code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''bigcode/gpt_bigcode-santacoder''': '''https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "gpt_bigcode" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "max_position_embeddings": "n_positions", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=5_0257, lowerCAmelCase__=1024, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=None, lowerCAmelCase__="gelu_pytorch_tanh", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.02, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Any: snake_case_ = vocab_size snake_case_ = n_positions snake_case_ = n_embd snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_inner snake_case_ = activation_function snake_case_ = resid_pdrop snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = layer_norm_epsilon snake_case_ = initializer_range snake_case_ = scale_attn_weights snake_case_ = use_cache snake_case_ = attention_softmax_in_fpaa snake_case_ = scale_attention_softmax_in_fpaa snake_case_ = multi_query snake_case_ = bos_token_id snake_case_ = eos_token_id super().__init__(bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
"""simple docstring""" import copy import re class UpperCamelCase : SCREAMING_SNAKE_CASE_ = "hp" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None @classmethod def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = prefix snake_case_ = defaults cls.build_naming_info() @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: if len(lowerCAmelCase__) == 0: return "" snake_case_ = None if any(char.isdigit() for char in word): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(lowerCAmelCase__) + 1): snake_case_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: snake_case_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase__): snake_case_ = '' while integer != 0: snake_case_ = chr(ord('A') + integer % 10) + s integer //= 10 return s snake_case_ = 0 while True: snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__) if sword in info["reverse_short_word"]: continue else: snake_case_ = sword break snake_case_ = short_word snake_case_ = word return short_word @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = param_name.split('_') snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name snake_case_ = ['', '_'] for separator in separators: snake_case_ = separator.join(lowerCAmelCase__) if shortname not in info["reverse_short_param"]: snake_case_ = shortname snake_case_ = param_name return shortname return param_name @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = short_name snake_case_ = param_name @classmethod def a_ ( cls) -> List[str]: if cls.NAMING_INFO is not None: return snake_case_ = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } snake_case_ = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = info @classmethod def a_ ( cls, lowerCAmelCase__) -> List[Any]: cls.build_naming_info() assert cls.PREFIX is not None snake_case_ = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue snake_case_ = cls.NAMING_INFO['short_param'][k] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = 1 if v else 0 snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-' snake_case_ = f'{key}{sep}{v}' name.append(lowerCAmelCase__) return "_".join(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__) -> Optional[Any]: snake_case_ = repr[len(cls.PREFIX) + 1 :] if repr == "": snake_case_ = [] else: snake_case_ = repr.split('_') snake_case_ = {} for value in values: if "-" in value: snake_case_ , snake_case_ = value.split('-') else: snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__) snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__)) snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k] snake_case_ = p_v for k in cls.DEFAULTS: if k not in parameters: snake_case_ = cls.DEFAULTS[k] return parameters
312
1
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __UpperCamelCase = datasets.logging.get_logger(__name__) __UpperCamelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' __UpperCamelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' __UpperCamelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' __UpperCamelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def a_ ( self) -> Any: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, homepage='https://github.com/google-research/bleurt', inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Value('string', id='sequence'), 'references': datasets.Value('string', id='sequence'), }), codebase_urls=['https://github.com/google-research/bleurt'], reference_urls=['https://github.com/google-research/bleurt', 'https://arxiv.org/abs/2004.04696'], ) def a_ ( self, lowerCAmelCase__) -> Any: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( 'Using default BLEURT-Base checkpoint for sequence maximum length 128. ' 'You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').') snake_case_ = 'bleurt-base-128' if self.config_name.lower() in CHECKPOINT_URLS: snake_case_ = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: snake_case_ = self.config_name.upper() else: raise KeyError( f'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}') # download the model checkpoint specified by self.config_name and set up the scorer snake_case_ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name]) snake_case_ = score.BleurtScorer(os.path.join(lowerCAmelCase__, lowerCAmelCase__)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = self.scorer.score(references=lowerCAmelCase__, candidates=lowerCAmelCase__) return {"scores": scores}
312
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = str(UpperCAmelCase ) dataset_info.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfo.from_directory(UpperCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCAmelCase ) snake_case_ = yaml.safe_load(UpperCAmelCase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = str(UpperCAmelCase ) dataset_infos_dict.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase = 1000 ) -> int: return sum(e for e in range(3 , UpperCAmelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"""{solution() = }""")
312
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE_ = frozenset([] ) def a_ ( self) -> Any: torch.manual_seed(0) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, ) snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) torch.manual_seed(0) snake_case_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0) snake_case_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, ) snake_case_ = CLIPTextModel(lowerCAmelCase__) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') snake_case_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) snake_case_ = image.cpu().permute(0, 2, 3, 1)[0] snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64)) snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64)) if str(lowerCAmelCase__).startswith('mps'): snake_case_ = torch.manual_seed(lowerCAmelCase__) else: snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a_ ( self) -> Dict: snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__) snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs(lowerCAmelCase__) snake_case_ = sd_pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> Union[str, Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def a_ ( self) -> Optional[int]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def a_ ( self) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler') snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
312
1
"""simple docstring""" import argparse import os import pickle import sys import torch from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() # We do this to be able to load python 2 datasets pickles # See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918 __UpperCamelCase = data_utils.TransfoXLTokenizer __UpperCamelCase = data_utils.TransfoXLCorpus __UpperCamelCase = data_utils __UpperCamelCase = data_utils def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: if transfo_xl_dataset_file: # Convert a pre-processed corpus (see original TensorFlow repo) with open(UpperCAmelCase , 'rb' ) as fp: snake_case_ = pickle.load(UpperCAmelCase , encoding='latin1' ) # Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term) snake_case_ = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file'] print(f'Save vocabulary to {pytorch_vocab_dump_path}' ) snake_case_ = corpus.vocab.__dict__ torch.save(UpperCAmelCase , UpperCAmelCase ) snake_case_ = corpus.__dict__ corpus_dict_no_vocab.pop('vocab' , UpperCAmelCase ) snake_case_ = pytorch_dump_folder_path + '/' + CORPUS_NAME print(f'Save dataset to {pytorch_dataset_dump_path}' ) torch.save(UpperCAmelCase , UpperCAmelCase ) if tf_checkpoint_path: # Convert a pre-trained TensorFlow model snake_case_ = os.path.abspath(UpperCAmelCase ) snake_case_ = os.path.abspath(UpperCAmelCase ) print(f'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' ) # Initialise PyTorch model if transfo_xl_config_file == "": snake_case_ = TransfoXLConfig() else: snake_case_ = TransfoXLConfig.from_json_file(UpperCAmelCase ) print(f'Building PyTorch model from configuration: {config}' ) snake_case_ = TransfoXLLMHeadModel(UpperCAmelCase ) snake_case_ = load_tf_weights_in_transfo_xl(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) # Save pytorch-model snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) print(f'Save PyTorch model to {os.path.abspath(UpperCAmelCase )}' ) torch.save(model.state_dict() , UpperCAmelCase ) print(f'Save configuration file to {os.path.abspath(UpperCAmelCase )}' ) with open(UpperCAmelCase , 'w' , encoding='utf-8' ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the folder to store the PyTorch model or dataset/vocab.''', ) parser.add_argument( '''--tf_checkpoint_path''', default='''''', type=str, help='''An optional path to a TensorFlow checkpoint path to be converted.''', ) parser.add_argument( '''--transfo_xl_config_file''', default='''''', type=str, help=( '''An optional config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--transfo_xl_dataset_file''', default='''''', type=str, help='''An optional dataset file to be converted in a vocabulary.''', ) __UpperCamelCase = parser.parse_args() convert_transfo_xl_checkpoint_to_pytorch( args.tf_checkpoint_path, args.transfo_xl_config_file, args.pytorch_dump_folder_path, args.transfo_xl_dataset_file, )
312
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} ) SCREAMING_SNAKE_CASE_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: snake_case_ = processors[data_args.task_name]() snake_case_ = processor.get_labels() snake_case_ = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
1
"""simple docstring""" import argparse import json import os import fairseq import torch from torch import nn from transformers import ( SpeechaTextaConfig, SpeechaTextaForCausalLM, SpeechaTextaTokenizer, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''', '''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''', '''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''', '''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''', '''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''', '''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''', '''fc2''': '''encoder.layers.*.feed_forward.output_dense''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } __UpperCamelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: for attribute in key.split('.' ): snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ) if weight_type is not None: snake_case_ = getattr(UpperCAmelCase , UpperCAmelCase ).shape else: snake_case_ = hf_pointer.shape assert hf_shape == value.shape, ( f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be' f' {value.shape} for {full_name}' ) if weight_type == "weight": snake_case_ = value elif weight_type == "weight_g": snake_case_ = value elif weight_type == "weight_v": snake_case_ = value elif weight_type == "bias": snake_case_ = value else: snake_case_ = value logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = [] snake_case_ = fairseq_model.state_dict() snake_case_ = hf_model.feature_extractor # if encoder has different dim to decoder -> use proj_weight snake_case_ = None for name, value in fairseq_dict.items(): snake_case_ = False if "conv_layers" in name: load_conv_layer( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , ) snake_case_ = True elif name.split('.' )[0] == "proj": snake_case_ = fairseq_model.proj snake_case_ = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: snake_case_ = True if "*" in mapped_key: snake_case_ = name.split(UpperCAmelCase )[0].split('.' )[-2] snake_case_ = mapped_key.replace('*' , UpperCAmelCase ) if "weight_g" in name: snake_case_ = 'weight_g' elif "weight_v" in name: snake_case_ = 'weight_v' elif "bias" in name: snake_case_ = 'bias' elif "weight" in name: snake_case_ = 'weight' else: snake_case_ = None set_recursively(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) continue if not is_used: unused_weights.append(UpperCAmelCase ) logger.warning(f'Unused weights: {unused_weights}' ) return proj_weight def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Dict: snake_case_ = full_name.split('conv_layers.' )[-1] snake_case_ = name.split('.' ) snake_case_ = int(items[0] ) snake_case_ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was' " found." ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'{full_name} has size {value.shape}, but' f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.' ) snake_case_ = value logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' ) else: unused_weights.append(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Any: snake_case_ , snake_case_ = emb.weight.shape snake_case_ = nn.Linear(UpperCAmelCase , UpperCAmelCase , bias=UpperCAmelCase ) snake_case_ = emb.weight.data return lin_layer def UpperCAmelCase ( UpperCAmelCase ) -> str: with open(UpperCAmelCase , 'r' , encoding='utf-8' ) as f: snake_case_ = f.readlines() snake_case_ = [line.split(' ' )[0] for line in lines] snake_case_ = len(UpperCAmelCase ) snake_case_ = { '<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3, } vocab_dict.update(dict(zip(UpperCAmelCase , range(4 , num_words + 4 ) ) ) ) return vocab_dict @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> Union[str, Any]: snake_case_ = WavaVecaConfig.from_pretrained(UpperCAmelCase ) snake_case_ = SpeechaTextaConfig.from_pretrained( UpperCAmelCase , vocab_size=UpperCAmelCase , decoder_layers=UpperCAmelCase , do_stable_layer_norm=UpperCAmelCase ) snake_case_ = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=UpperCAmelCase , return_attention_mask=UpperCAmelCase , ) snake_case_ , snake_case_ , snake_case_ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) snake_case_ = model[0].eval() # set weights for wav2vec2 encoder snake_case_ = WavaVecaModel(UpperCAmelCase ) snake_case_ = recursively_load_weights_wavaveca(model.encoder , UpperCAmelCase ) snake_case_ = SpeechaTextaForCausalLM(UpperCAmelCase ) snake_case_ , snake_case_ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=UpperCAmelCase ) # set output linear layer unexpected_keys.remove('embed_out' ) snake_case_ = nn.Parameter(model.decoder.embed_out.detach() ) # layer norm is init to identity matrix so leaving it is fine logger.warning(f'The following keys are missing when loading the decoder weights: {missing_keys}' ) logger.warning(f'The following keys are unexpected when loading the decoder weights: {unexpected_keys}' ) snake_case_ = SpeechEncoderDecoderModel(encoder=UpperCAmelCase , decoder=UpperCAmelCase ) snake_case_ = False # add projection layer snake_case_ = nn.Parameter(projection_layer.weight ) snake_case_ = nn.Parameter(projection_layer.bias ) snake_case_ = create_vocab_dict(UpperCAmelCase ) with open(os.path.join(UpperCAmelCase , 'vocab.json' ) , 'w' ) as fp: json.dump(UpperCAmelCase , UpperCAmelCase ) snake_case_ = SpeechaTextaTokenizer(os.path.join(UpperCAmelCase , 'vocab.json' ) ) tokenizer.save_pretrained(UpperCAmelCase ) snake_case_ = hf_wavavec.config.to_dict() snake_case_ = tokenizer.pad_token_id snake_case_ = tokenizer.bos_token_id snake_case_ = tokenizer.eos_token_id snake_case_ = 'speech_to_text_2' snake_case_ = 'wav2vec2' snake_case_ = SpeechEncoderDecoderConfig.from_dict(UpperCAmelCase ) hf_wavavec.save_pretrained(UpperCAmelCase ) feature_extractor.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument( '''--encoder_config_path''', default='''facebook/wav2vec2-large-lv60''', type=str, help='''Path to hf encoder wav2vec2 checkpoint config''', ) parser.add_argument( '''--decoder_config_path''', default='''facebook/s2t-small-mustc-en-fr-st''', type=str, help='''Path to hf decoder s2t checkpoint config''', ) parser.add_argument('''--vocab_size''', default=1_0224, type=int, help='''Vocab size of decoder''') parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''') __UpperCamelCase = parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, vocab_size=args.vocab_size, num_decoder_layers=args.num_decoder_layers, )
312
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False ) -> str: snake_case_ = 'backbone.' if is_semantic else '' snake_case_ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'{prefix}blocks.{i}.norm1.weight', f'beit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'{prefix}blocks.{i}.norm1.bias', f'beit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append( (f'{prefix}blocks.{i}.attn.proj.weight', f'beit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append( (f'{prefix}blocks.{i}.attn.proj.bias', f'beit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'{prefix}blocks.{i}.norm2.weight', f'beit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'{prefix}blocks.{i}.norm2.bias', f'beit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.weight', f'beit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc1.bias', f'beit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.weight', f'beit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'{prefix}blocks.{i}.mlp.fc2.bias', f'beit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ (f'{prefix}cls_token', 'beit.embeddings.cls_token'), (f'{prefix}patch_embed.proj.weight', 'beit.embeddings.patch_embeddings.projection.weight'), (f'{prefix}patch_embed.proj.bias', 'beit.embeddings.patch_embeddings.projection.bias'), (f'{prefix}pos_embed', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False ) -> int: for i in range(config.num_hidden_layers ): snake_case_ = 'backbone.' if is_semantic else '' # queries, keys and values snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.attn.qkv.weight' ) snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.attn.q_bias' ) snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.attn.v_bias' ) snake_case_ = in_proj_weight[ : config.hidden_size, : ] snake_case_ = q_bias snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_weight[ -config.hidden_size :, : ] snake_case_ = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.gamma_1' ) snake_case_ = state_dict.pop(f'{prefix}blocks.{i}.gamma_2' ) snake_case_ = gamma_a snake_case_ = gamma_a def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = dct.pop(UpperCAmelCase ) snake_case_ = val def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> List[str]: snake_case_ = False if 'rvlcdip' in checkpoint_url else True snake_case_ = BeitConfig(use_absolute_position_embeddings=UpperCAmelCase , use_mask_token=UpperCAmelCase ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: snake_case_ = 1024 snake_case_ = 4096 snake_case_ = 24 snake_case_ = 16 # labels if "rvlcdip" in checkpoint_url: snake_case_ = 16 snake_case_ = 'huggingface/label-files' snake_case_ = 'rvlcdip-id2label.json' snake_case_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys snake_case_ = torch.hub.load_state_dict_from_url(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = create_rename_keys(UpperCAmelCase , has_lm_head=UpperCAmelCase ) for src, dest in rename_keys: rename_key(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) read_in_q_k_v(UpperCAmelCase , UpperCAmelCase , has_lm_head=UpperCAmelCase ) # load HuggingFace model snake_case_ = BeitForMaskedImageModeling(UpperCAmelCase ) if has_lm_head else BeitForImageClassification(UpperCAmelCase ) model.eval() model.load_state_dict(UpperCAmelCase ) # Check outputs on an image snake_case_ = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCAmelCase ) snake_case_ = prepare_img() snake_case_ = image_processor(images=UpperCAmelCase , return_tensors='pt' ) snake_case_ = encoding['pixel_values'] snake_case_ = model(UpperCAmelCase ) snake_case_ = outputs.logits # verify logits snake_case_ = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 196, 8192] assert logits.shape == torch.Size(UpperCAmelCase ), "Shape of logits not as expected" Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'Saving model to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCAmelCase ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCAmelCase ) if push_to_hub: if has_lm_head: snake_case_ = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: snake_case_ = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase , ) model.push_to_hub( repo_path_or_name=Path(UpperCAmelCase , UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=UpperCAmelCase , ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_url''', default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''', type=str, help='''URL to the original PyTorch checkpoint (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', ) __UpperCamelCase = parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
312
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict: snake_case_ = [] for old_item in old_list: snake_case_ = old_item.replace('in_layers.0' , 'norm1' ) snake_case_ = new_item.replace('in_layers.2' , 'conv1' ) snake_case_ = new_item.replace('out_layers.0' , 'norm2' ) snake_case_ = new_item.replace('out_layers.3' , 'conv2' ) snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' ) snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]: snake_case_ = [] for old_item in old_list: snake_case_ = old_item snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' ) snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' ) snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]: assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case_ = old_checkpoint[path] snake_case_ = old_tensor.shape[0] // 3 snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3 snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 ) snake_case_ = query.reshape(UpperCAmelCase ) snake_case_ = key.reshape(UpperCAmelCase ) snake_case_ = value.reshape(UpperCAmelCase ) for path in paths: snake_case_ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case_ = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case_ = old_checkpoint[path['old']][:, :, 0] else: snake_case_ = old_checkpoint[path['old']] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = {} snake_case_ = checkpoint['time_embed.0.weight'] snake_case_ = checkpoint['time_embed.0.bias'] snake_case_ = checkpoint['time_embed.2.weight'] snake_case_ = checkpoint['time_embed.2.bias'] snake_case_ = checkpoint['input_blocks.0.0.weight'] snake_case_ = checkpoint['input_blocks.0.0.bias'] snake_case_ = checkpoint['out.0.weight'] snake_case_ = checkpoint['out.0.bias'] snake_case_ = checkpoint['out.2.weight'] snake_case_ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the middle blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the output blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } for i in range(1 , UpperCAmelCase ): snake_case_ = (i - 1) // (config['num_res_blocks'] + 1) snake_case_ = (i - 1) % (config['num_res_blocks'] + 1) snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.weight' ] snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase ) if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , ) snake_case_ = middle_blocks[0] snake_case_ = middle_blocks[1] snake_case_ = middle_blocks[2] snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase ) for i in range(UpperCAmelCase ): snake_case_ = i // (config['num_res_blocks'] + 1) snake_case_ = i % (config['num_res_blocks'] + 1) snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]] snake_case_ = {} for layer in output_block_layers: snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCAmelCase ) else: snake_case_ = [layer_name] if len(UpperCAmelCase ) > 1: snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(UpperCAmelCase ) == 2: snake_case_ = [] if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , ) else: snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] ) snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] ) snake_case_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __UpperCamelCase = json.loads(f.read()) __UpperCamelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __UpperCamelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
312
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json''', # See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "gpt_neox" def __init__( self, lowerCAmelCase__=5_0432, lowerCAmelCase__=6144, lowerCAmelCase__=44, lowerCAmelCase__=64, lowerCAmelCase__=2_4576, lowerCAmelCase__="gelu", lowerCAmelCase__=0.25, lowerCAmelCase__=1_0000, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__=2048, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=True, lowerCAmelCase__=0, lowerCAmelCase__=2, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Any: super().__init__(bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = rotary_pct snake_case_ = rotary_emb_base snake_case_ = attention_dropout snake_case_ = hidden_dropout snake_case_ = classifier_dropout snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = tie_word_embeddings snake_case_ = use_parallel_residual snake_case_ = rope_scaling self._rope_scaling_validation() if self.hidden_size % self.num_attention_heads != 0: raise ValueError( 'The hidden size is not divisble by the number of attention heads! Make sure to update them!') def a_ ( self) -> Tuple: if self.rope_scaling is None: return if not isinstance(self.rope_scaling, lowerCAmelCase__) or len(self.rope_scaling) != 2: raise ValueError( '`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, ' f'got {self.rope_scaling}') snake_case_ = self.rope_scaling.get('type', lowerCAmelCase__) snake_case_ = self.rope_scaling.get('factor', lowerCAmelCase__) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( f'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}') if rope_scaling_factor is None or not isinstance(lowerCAmelCase__, lowerCAmelCase__) or rope_scaling_factor <= 1.0: raise ValueError(f'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}')
312
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # vision encoder if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCAmelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCAmelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def UpperCAmelCase ( ) -> Any: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int: snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCAmelCase ).eval() snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) processor.save_pretrained(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) print('Successfully saved processor and model to' , UpperCAmelCase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase , organization='nielsr' ) model.push_to_hub(UpperCAmelCase , organization='nielsr' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) __UpperCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __UpperCamelCase = { '''configuration_swiftformer''': [ '''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwiftFormerConfig''', '''SwiftFormerOnnxConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwiftFormerForImageClassification''', '''SwiftFormerModel''', '''SwiftFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
1
"""simple docstring""" import os import sys import unittest __UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __UpperCamelCase = os.path.join('''tests''', '''models''', '''bert''', '''test_modeling_bert.py''') __UpperCamelCase = os.path.join('''tests''', '''models''', '''blip''', '''test_modeling_blip.py''') class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Dict: snake_case_ = get_test_to_tester_mapping(lowerCAmelCase__) snake_case_ = get_test_to_tester_mapping(lowerCAmelCase__) snake_case_ = {'BertModelTest': 'BertModelTester'} snake_case_ = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(get_test_info.to_json(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> str: snake_case_ = get_model_to_test_mapping(lowerCAmelCase__) snake_case_ = get_model_to_test_mapping(lowerCAmelCase__) snake_case_ = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } snake_case_ = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(get_test_info.to_json(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Union[str, Any]: snake_case_ = get_model_to_tester_mapping(lowerCAmelCase__) snake_case_ = get_model_to_tester_mapping(lowerCAmelCase__) snake_case_ = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } snake_case_ = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(get_test_info.to_json(lowerCAmelCase__), lowerCAmelCase__)
312
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) snake_case_ = number_of_bytes // partitions snake_case_ = [] for i in range(UpperCAmelCase ): snake_case_ = i * bytes_per_partition + 1 snake_case_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> str: snake_case_ = 'hf-internal-testing/tiny-random-t5' snake_case_ = AutoTokenizer.from_pretrained(lowerCAmelCase__) snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__) snake_case_ = tokenizer('This is me', return_tensors='pt') snake_case_ = model.to_bettertransformer() self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules())) snake_case_ = model.generate(**lowerCAmelCase__) snake_case_ = model.reverse_bettertransformer() self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules())) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__) snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__) self.assertFalse( any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules())) snake_case_ = model_reloaded.generate(**lowerCAmelCase__) self.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__)) def a_ ( self) -> str: snake_case_ = 'hf-internal-testing/tiny-random-t5' snake_case_ = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase__) snake_case_ = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(lowerCAmelCase__): model.save_pretrained(lowerCAmelCase__) snake_case_ = model.reverse_bettertransformer() model.save_pretrained(lowerCAmelCase__)
312
"""simple docstring""" __UpperCamelCase = 256 # Modulus to hash a string __UpperCamelCase = 100_0003 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool: snake_case_ = len(UpperCAmelCase ) snake_case_ = len(UpperCAmelCase ) if p_len > t_len: return False snake_case_ = 0 snake_case_ = 0 snake_case_ = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCAmelCase ): snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: snake_case_ = 'abc1abc12' snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' snake_case_ = 'alskfjaldsk23adsfabcabc' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 2) snake_case_ = 'ABABX' snake_case_ = 'ABABZABABYABABX' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 3) snake_case_ = 'AAAB' snake_case_ = 'ABAAAAAB' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 4) snake_case_ = 'abcdabcy' snake_case_ = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 5) snake_case_ = 'Lü' snake_case_ = 'Lüsai' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'Lue' assert not rabin_karp(UpperCAmelCase , UpperCAmelCase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
312
1
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase = False __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''ybelkada/fonts''' def UpperCAmelCase ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' 'Pix2StructImageProcessor. Please upgrade torch.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: requires_backends(UpperCAmelCase , ['torch'] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image: requires_backends(UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=UpperCAmelCase ) snake_case_ = '\n'.join(UpperCAmelCase ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(UpperCAmelCase ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' ) snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase ) snake_case_ = ImageDraw.Draw(UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(UpperCAmelCase ) if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) return new_image class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["flattened_patches"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST) snake_case_ = torch.from_numpy(lowerCAmelCase__) snake_case_ , snake_case_ = patch_size['height'], patch_size['width'] snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1) snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1) snake_case_ = max(num_feasible_rows * patch_height, 1) snake_case_ = max(num_feasible_cols * patch_width, 1) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1]) snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa) snake_case_ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float() snake_case_ = to_numpy_array(lowerCAmelCase__) return result def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa) # take mean across the whole `image` snake_case_ = np.mean(lowerCAmelCase__) snake_case_ = np.std(lowerCAmelCase__) snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput: snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get('data_format', lowerCAmelCase__) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__) snake_case_ = kwargs.pop('font_path', lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [header_text] * len(lowerCAmelCase__) snake_case_ = [ render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] snake_case_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__) return encoded_outputs
312
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "resnet" SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}') snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = downsample_in_first_stage snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-3
312
1
"""simple docstring""" import os from glob import glob import imageio import torch import torchvision import wandb from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan from loaders import load_vqgan from PIL import Image from torch import nn from transformers import CLIPModel, CLIPTokenizerFast from utils import get_device, get_timestamp, show_pil class UpperCamelCase : def __init__( self, lowerCAmelCase__ = "cpu", lowerCAmelCase__ = "openai/clip-vit-large-patch14") -> None: snake_case_ = device snake_case_ = CLIPTokenizerFast.from_pretrained(lowerCAmelCase__) snake_case_ = [0.48145466, 0.4578275, 0.40821073] snake_case_ = [0.26862954, 0.26130258, 0.27577711] snake_case_ = torchvision.transforms.Normalize(self.image_mean, self.image_std) snake_case_ = torchvision.transforms.Resize(224) snake_case_ = torchvision.transforms.CenterCrop(224) def a_ ( self, lowerCAmelCase__) -> List[Any]: snake_case_ = self.resize(lowerCAmelCase__) snake_case_ = self.center_crop(lowerCAmelCase__) snake_case_ = self.normalize(lowerCAmelCase__) return images def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Dict: snake_case_ = self.tokenizer(text=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.preprocess_img(lowerCAmelCase__) snake_case_ = {key: value.to(self.device) for (key, value) in encoding.items()} return encoding class UpperCamelCase ( nn.Module ): def __init__( self, lowerCAmelCase__=10, lowerCAmelCase__=0.01, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__="image", lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False, ) -> None: super().__init__() snake_case_ = None snake_case_ = device if device else get_device() if vqgan: snake_case_ = vqgan else: snake_case_ = load_vqgan(self.device, conf_path=lowerCAmelCase__, ckpt_path=lowerCAmelCase__) self.vqgan.eval() if clip: snake_case_ = clip else: snake_case_ = CLIPModel.from_pretrained('openai/clip-vit-base-patch32') self.clip.to(self.device) snake_case_ = ProcessorGradientFlow(device=self.device) snake_case_ = iterations snake_case_ = lr snake_case_ = log snake_case_ = make_grid snake_case_ = return_val snake_case_ = quantize snake_case_ = self.vqgan.decoder.z_shape def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=5, lowerCAmelCase__=True) -> List[str]: snake_case_ = [] if output_path is None: snake_case_ = './animation.gif' if input_path is None: snake_case_ = self.save_path snake_case_ = sorted(glob(input_path + '/*')) if not len(lowerCAmelCase__): raise ValueError( 'No images found in save path, aborting (did you pass save_intermediate=True to the generate' ' function?)') if len(lowerCAmelCase__) == 1: print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)') snake_case_ = total_duration / len(lowerCAmelCase__) snake_case_ = [frame_duration] * len(lowerCAmelCase__) if extend_frames: snake_case_ = 1.5 snake_case_ = 3 for file_name in paths: if file_name.endswith('.png'): images.append(imageio.imread(lowerCAmelCase__)) imageio.mimsave(lowerCAmelCase__, lowerCAmelCase__, duration=lowerCAmelCase__) print(f'gif saved to {output_path}') def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=None) -> List[str]: if not (path or img): raise ValueError('Input either path or tensor') if img is not None: raise NotImplementedError snake_case_ = preprocess(Image.open(lowerCAmelCase__), target_image_size=256).to(self.device) snake_case_ = preprocess_vqgan(lowerCAmelCase__) snake_case_ , *snake_case_ = self.vqgan.encode(lowerCAmelCase__) return z def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = self.latent.detach().requires_grad_() snake_case_ = base_latent + transform_vector if self.quantize: snake_case_ , *snake_case_ = self.vqgan.quantize(lowerCAmelCase__) else: snake_case_ = trans_latent return self.vqgan.decode(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None) -> Optional[int]: snake_case_ = self.clip_preprocessor(text=lowerCAmelCase__, images=lowerCAmelCase__, return_tensors='pt', padding=lowerCAmelCase__) snake_case_ = self.clip(**lowerCAmelCase__) snake_case_ = clip_outputs.logits_per_image if weights is not None: snake_case_ = similarity_logits * weights return similarity_logits.sum() def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]: snake_case_ = self._get_clip_similarity(pos_prompts['prompts'], lowerCAmelCase__, weights=(1 / pos_prompts['weights'])) if neg_prompts: snake_case_ = self._get_clip_similarity(neg_prompts['prompts'], lowerCAmelCase__, weights=neg_prompts['weights']) else: snake_case_ = torch.tensor([1], device=self.device) snake_case_ = -torch.log(lowerCAmelCase__) + torch.log(lowerCAmelCase__) return loss def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: snake_case_ = torch.randn_like(self.latent, requires_grad=lowerCAmelCase__, device=self.device) snake_case_ = torch.optim.Adam([vector], lr=self.lr) for i in range(self.iterations): optim.zero_grad() snake_case_ = self._add_vector(lowerCAmelCase__) snake_case_ = loop_post_process(lowerCAmelCase__) snake_case_ = self._get_CLIP_loss(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) print('CLIP loss', lowerCAmelCase__) if self.log: wandb.log({'CLIP Loss': clip_loss}) clip_loss.backward(retain_graph=lowerCAmelCase__) optim.step() if self.return_val == "image": yield custom_to_pil(transformed_img[0]) else: yield vector def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]: wandb.init(reinit=lowerCAmelCase__, project='face-editor') wandb.config.update({'Positive Prompts': positive_prompts}) wandb.config.update({'Negative Prompts': negative_prompts}) wandb.config.update({'lr': self.lr, 'iterations': self.iterations}) if image_path: snake_case_ = Image.open(lowerCAmelCase__) snake_case_ = image.resize((256, 256)) wandb.log('Original Image', wandb.Image(lowerCAmelCase__)) def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: if not prompts: return [] snake_case_ = [] snake_case_ = [] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [prompt.strip() for prompt in prompts.split('|')] for prompt in prompts: if isinstance(lowerCAmelCase__, (tuple, list)): snake_case_ = prompt[0] snake_case_ = float(prompt[1]) elif ":" in prompt: snake_case_ , snake_case_ = prompt.split(':') snake_case_ = float(lowerCAmelCase__) else: snake_case_ = prompt snake_case_ = 1.0 processed_prompts.append(lowerCAmelCase__) weights.append(lowerCAmelCase__) return { "prompts": processed_prompts, "weights": torch.tensor(lowerCAmelCase__, device=self.device), } def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=None, ) -> Any: if image_path: snake_case_ = self._get_latent(lowerCAmelCase__) else: snake_case_ = torch.randn(self.latent_dim, device=self.device) if self.log: self._init_logging(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) assert pos_prompts, "You must provide at least one positive prompt." snake_case_ = self.process_prompts(lowerCAmelCase__) snake_case_ = self.process_prompts(lowerCAmelCase__) if save_final and save_path is None: snake_case_ = os.path.join('./outputs/', '_'.join(pos_prompts['prompts'])) if not os.path.exists(lowerCAmelCase__): os.makedirs(lowerCAmelCase__) else: snake_case_ = save_path + '_' + get_timestamp() os.makedirs(lowerCAmelCase__) snake_case_ = save_path snake_case_ = self.vqgan.decode(self.latent)[0] if show_intermediate: print('Original Image') show_pil(custom_to_pil(lowerCAmelCase__)) snake_case_ = loop_post_process(lowerCAmelCase__) for iter, transformed_img in enumerate(self._optimize_CLIP(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__)): if show_intermediate: show_pil(lowerCAmelCase__) if save_intermediate: transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}.png')) if self.log: wandb.log({'Image': wandb.Image(lowerCAmelCase__)}) if show_final: show_pil(lowerCAmelCase__) if save_final: transformed_img.save(os.path.join(self.save_path, f'iter_{iter:03d}_final.png'))
312
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = 'this is a test' snake_case_ = 'this is a test' return input_text, output_text def a_ ( self) -> Optional[int]: snake_case_ = '<pad>' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '<pad>') self.assertEqual(vocab_keys[1], '<unk>') self.assertEqual(vocab_keys[-1], '[PAD]') self.assertEqual(len(lowerCAmelCase__), 3_0001) def a_ ( self) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size, 3_0000) def a_ ( self) -> List[str]: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> str: pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> List[Any]: pass def a_ ( self) -> str: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> List[Any]: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Any: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = 'This is a test' snake_case_ = [13, 1, 4398, 25, 21, 1289] snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = DebertaVaTokenizer(lowerCAmelCase__) snake_case_ = tokenizer.encode('sequence builders') snake_case_ = tokenizer.encode('multi-sequence build') snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, ) @slow def a_ ( self) -> Union[str, Any]: # fmt: off snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
312
1
"""simple docstring""" import json import os import tempfile from unittest.mock import patch import torch from torch.utils.data import DataLoader, TensorDataset from accelerate import DistributedType, infer_auto_device_map, init_empty_weights from accelerate.accelerator import Accelerator from accelerate.state import GradientState, PartialState from accelerate.test_utils import require_bnb, require_multi_gpu, slow from accelerate.test_utils.testing import AccelerateTestCase, require_cuda from accelerate.utils import patch_environment def UpperCAmelCase ( ) -> List[str]: snake_case_ = torch.nn.Linear(2 , 4 ) snake_case_ = torch.optim.AdamW(model.parameters() , lr=1.0 ) snake_case_ = torch.optim.lr_scheduler.OneCycleLR(UpperCAmelCase , max_lr=0.01 , steps_per_epoch=2 , epochs=1 ) snake_case_ = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) ) snake_case_ = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) ) return model, optimizer, scheduler, train_dl, valid_dl def UpperCAmelCase ( UpperCAmelCase ) -> int: return (model.weight.abs().sum() + model.bias.abs().sum()).item() def UpperCAmelCase ( UpperCAmelCase ) -> List[Any]: snake_case_ = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict() model.load_state_dict(UpperCAmelCase ) class UpperCamelCase ( lowerCAmelCase__ ): @require_cuda def a_ ( self) -> List[str]: snake_case_ = Accelerator() assert PartialState._shared_state["_cpu"] is False assert PartialState._shared_state["device"].type == "cuda" with self.assertRaises(lowerCAmelCase__): snake_case_ = Accelerator(cpu=lowerCAmelCase__) def a_ ( self) -> Optional[int]: snake_case_ = Accelerator() snake_case_ = GradientState() assert state.num_steps == 1 snake_case_ = 4 assert state.num_steps == 4 assert state.sync_gradients is True snake_case_ = False assert state.sync_gradients is False GradientState._reset_state() def a_ ( self) -> Dict: snake_case_ = Accelerator() snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components() ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = accelerator.prepare(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) self.assertTrue(prepared_model in accelerator._models) self.assertTrue(prepared_optimizer in accelerator._optimizers) self.assertTrue(prepared_scheduler in accelerator._schedulers) self.assertTrue(prepared_train_dl in accelerator._dataloaders) self.assertTrue(prepared_valid_dl in accelerator._dataloaders) def a_ ( self) -> Dict: snake_case_ = Accelerator() snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components() accelerator.prepare(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) accelerator.free_memory() self.assertTrue(len(accelerator._models) == 0) self.assertTrue(len(accelerator._optimizers) == 0) self.assertTrue(len(accelerator._schedulers) == 0) self.assertTrue(len(accelerator._dataloaders) == 0) def a_ ( self) -> Optional[Any]: PartialState._reset_state() # Mock torch.cuda.set_device to avoid an exception as the device doesn't exist def noop(*lowerCAmelCase__, **lowerCAmelCase__): pass with patch('torch.cuda.set_device', lowerCAmelCase__), patch_environment(ACCELERATE_TORCH_DEVICE='cuda:64'): snake_case_ = Accelerator() self.assertEqual(str(accelerator.state.device), 'cuda:64') def a_ ( self) -> Union[str, Any]: snake_case_ = Accelerator() snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components() accelerator.prepare(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = get_signature(lowerCAmelCase__) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowerCAmelCase__) # make sure random weights don't match load_random_weights(lowerCAmelCase__) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase__)) > 1e-3) # make sure loaded weights match accelerator.load_state(lowerCAmelCase__) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase__)) < 1e-3) def a_ ( self) -> Optional[Any]: snake_case_ = Accelerator() snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components() accelerator.prepare(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = get_signature(lowerCAmelCase__) # saving hook def save_config(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__): snake_case_ = {'class_name': models[0].__class__.__name__} with open(os.path.join(lowerCAmelCase__, 'data.json'), 'w') as f: json.dump(lowerCAmelCase__, lowerCAmelCase__) # loading hook def load_config(lowerCAmelCase__, lowerCAmelCase__): with open(os.path.join(lowerCAmelCase__, 'data.json'), 'r') as f: snake_case_ = json.load(lowerCAmelCase__) snake_case_ = config['class_name'] snake_case_ = accelerator.register_save_state_pre_hook(lowerCAmelCase__) snake_case_ = accelerator.register_load_state_pre_hook(lowerCAmelCase__) with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowerCAmelCase__) # make sure random weights don't match with hooks load_random_weights(lowerCAmelCase__) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase__)) > 1e-3) # random class name to verify correct one is loaded snake_case_ = 'random' # make sure loaded weights match with hooks accelerator.load_state(lowerCAmelCase__) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase__)) < 1e-3) # mode.class_name is loaded from config self.assertTrue(model.class_name == model.__class__.__name__) # remove hooks save_hook.remove() load_hook.remove() with tempfile.TemporaryDirectory() as tmpdirname: accelerator.save_state(lowerCAmelCase__) # make sure random weights don't match with hooks removed load_random_weights(lowerCAmelCase__) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase__)) > 1e-3) # random class name to verify correct one is loaded snake_case_ = 'random' # make sure loaded weights match with hooks removed accelerator.load_state(lowerCAmelCase__) self.assertTrue(abs(model_signature - get_signature(lowerCAmelCase__)) < 1e-3) # mode.class_name is NOT loaded from config self.assertTrue(model.class_name != model.__class__.__name__) def a_ ( self) -> Optional[Any]: snake_case_ = Accelerator() snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components() snake_case_ = None # This should work snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) self.assertTrue(dummy_obj is None) def a_ ( self) -> List[str]: snake_case_ = Accelerator() snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = create_components() snake_case_ = [1, 2, 3] # This should work snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = accelerator.prepare( lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) self.assertEqual( getattr(lowerCAmelCase__, '_is_accelerate_prepared', lowerCAmelCase__), lowerCAmelCase__, 'Dummy object should have `_is_accelerate_prepared` set to `True`', ) self.assertEqual( getattr(lowerCAmelCase__, '_is_accelerate_prepared', lowerCAmelCase__), lowerCAmelCase__, 'Model is missing `_is_accelerator_prepared` or is set to `False`', ) self.assertEqual( getattr(lowerCAmelCase__, '_is_accelerate_prepared', lowerCAmelCase__), lowerCAmelCase__, 'Optimizer is missing `_is_accelerator_prepared` or is set to `False`', ) self.assertEqual( getattr(lowerCAmelCase__, '_is_accelerate_prepared', lowerCAmelCase__), lowerCAmelCase__, 'Scheduler is missing `_is_accelerator_prepared` or is set to `False`', ) self.assertEqual( getattr(lowerCAmelCase__, '_is_accelerate_prepared', lowerCAmelCase__), lowerCAmelCase__, 'Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`', ) self.assertEqual( getattr(lowerCAmelCase__, '_is_accelerate_prepared', lowerCAmelCase__), lowerCAmelCase__, 'Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`', ) @slow @require_bnb def a_ ( self) -> Optional[Any]: from transformers import AutoModelForCausalLM snake_case_ = AutoModelForCausalLM.from_pretrained( 'EleutherAI/gpt-neo-125m', load_in_abit=lowerCAmelCase__, device_map={'': 0}, ) snake_case_ = Accelerator() # This should work snake_case_ = accelerator.prepare(lowerCAmelCase__) @slow @require_bnb def a_ ( self) -> List[Any]: from transformers import AutoModelForCausalLM snake_case_ = Accelerator() with init_empty_weights(): snake_case_ = AutoModelForCausalLM.from_pretrained( 'EleutherAI/gpt-neo-125m', ) model.tie_weights() snake_case_ = infer_auto_device_map(lowerCAmelCase__) snake_case_ = 'cpu' snake_case_ = AutoModelForCausalLM.from_pretrained( 'EleutherAI/gpt-neo-125m', device_map=lowerCAmelCase__, load_in_abit=lowerCAmelCase__, llm_inta_enable_fpaa_cpu_offload=lowerCAmelCase__) # This should not work and get value error with self.assertRaises(lowerCAmelCase__): snake_case_ = accelerator.prepare(lowerCAmelCase__) @slow @require_bnb @require_multi_gpu def a_ ( self) -> int: from transformers import AutoModelForCausalLM snake_case_ = {'distributed_type': DistributedType.MULTI_GPU} with init_empty_weights(): snake_case_ = AutoModelForCausalLM.from_pretrained( 'EleutherAI/gpt-neo-125m', ) model.tie_weights() snake_case_ = infer_auto_device_map(lowerCAmelCase__) snake_case_ = 1 snake_case_ = AutoModelForCausalLM.from_pretrained( 'EleutherAI/gpt-neo-125m', load_in_abit=lowerCAmelCase__, device_map=lowerCAmelCase__, ) snake_case_ = Accelerator() # This should not work and get value error with self.assertRaises(lowerCAmelCase__): snake_case_ = accelerator.prepare(lowerCAmelCase__) PartialState._reset_state() @slow @require_bnb @require_multi_gpu def a_ ( self) -> List[str]: from transformers import AutoModelForCausalLM with init_empty_weights(): snake_case_ = AutoModelForCausalLM.from_pretrained( 'EleutherAI/gpt-neo-125m', ) snake_case_ = infer_auto_device_map(lowerCAmelCase__) snake_case_ = 1 snake_case_ = AutoModelForCausalLM.from_pretrained( 'EleutherAI/gpt-neo-125m', load_in_abit=lowerCAmelCase__, device_map=lowerCAmelCase__, ) snake_case_ = Accelerator() # This should work snake_case_ = accelerator.prepare(lowerCAmelCase__) @require_cuda def a_ ( self) -> Optional[Any]: snake_case_ = torch.nn.Linear(10, 10) snake_case_ = torch.optim.SGD(model.parameters(), lr=0.01) snake_case_ = Accelerator(cpu=lowerCAmelCase__) snake_case_ = accelerator.prepare(lowerCAmelCase__)
312
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {} __UpperCamelCase = {} __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]: snake_case_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) snake_case_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) snake_case_ = format_type def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]: snake_case_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): snake_case_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: __UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: __UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: __UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter: snake_case_ = get_format_type_from_alias(UpperCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
312
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging __UpperCamelCase = logging.get_logger(__name__) def UpperCAmelCase ( ) -> Tuple: # Get the sagemaker specific mp parameters from smp_options variable. snake_case_ = os.getenv('SM_HP_MP_PARAMETERS' , '{}' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. snake_case_ = json.loads(UpperCAmelCase ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. snake_case_ = os.getenv('SM_FRAMEWORK_PARAMS' , '{}' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". snake_case_ = json.loads(UpperCAmelCase ) if not mpi_options.get('sagemaker_mpi_enabled' , UpperCAmelCase ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('smdistributed' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = field( default="" , metadata={"help": "Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer"} , ) def a_ ( self) -> Union[str, Any]: super().__post_init__() warnings.warn( '`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ' '`TrainingArguments` instead.', lowerCAmelCase__, ) @cached_property def a_ ( self) -> "torch.device": logger.info('PyTorch: setting up devices') if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( 'torch.distributed process group is initialized, but local_rank == -1. ' 'In order to use Torch DDP, launch your script with `python -m torch.distributed.launch') if self.no_cuda: snake_case_ = torch.device('cpu') snake_case_ = 0 elif is_sagemaker_model_parallel_available(): snake_case_ = smp.local_rank() snake_case_ = torch.device('cuda', lowerCAmelCase__) snake_case_ = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='smddp', timeout=self.ddp_timeout_delta) snake_case_ = int(os.getenv('SMDATAPARALLEL_LOCAL_RANK')) snake_case_ = torch.device('cuda', self.local_rank) snake_case_ = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 snake_case_ = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. snake_case_ = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='nccl', timeout=self.ddp_timeout_delta) snake_case_ = torch.device('cuda', self.local_rank) snake_case_ = 1 if device.type == "cuda": torch.cuda.set_device(lowerCAmelCase__) return device @property def a_ ( self) -> Optional[int]: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def a_ ( self) -> int: return not is_sagemaker_model_parallel_available() @property def a_ ( self) -> Optional[Any]: return False
312
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __UpperCamelCase = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __UpperCamelCase = { '''RUCAIBox/mvp''': 1024, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = MvpTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type')) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**lowerCAmelCase__) snake_case_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ = 'post_processor' snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) if tokenizer_component_instance: snake_case_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ = tuple(state['sep']) if "cls" in state: snake_case_ = tuple(state['cls']) snake_case_ = False if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = add_prefix_space snake_case_ = True if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets: snake_case_ = trim_offsets snake_case_ = True if changes_to_apply: snake_case_ = getattr(lowerCAmelCase__, state.pop('type')) snake_case_ = component_class(**lowerCAmelCase__) setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) @property def a_ ( self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value snake_case_ = value def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str: snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
312
1
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=30, lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=32, lowerCAmelCase__=2, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=10, lowerCAmelCase__=0.02, lowerCAmelCase__=3, lowerCAmelCase__=0.6, lowerCAmelCase__=None, ) -> Dict: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = mask_ratio snake_case_ = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) snake_case_ = (image_size // patch_size) ** 2 snake_case_ = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def a_ ( self) -> Optional[Any]: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size) snake_case_ = self.get_config() return config, pixel_values, labels def a_ ( self) -> int: return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, decoder_hidden_size=self.hidden_size, decoder_num_hidden_layers=self.num_hidden_layers, decoder_num_attention_heads=self.num_attention_heads, decoder_intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> int: snake_case_ = TFViTMAEModel(config=lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, training=lowerCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = TFViTMAEForPreTraining(lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, training=lowerCAmelCase__) # expected sequence length = num_patches snake_case_ = (self.image_size // self.patch_size) ** 2 snake_case_ = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) # test greyscale images snake_case_ = 1 snake_case_ = TFViTMAEForPreTraining(lowerCAmelCase__) snake_case_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) snake_case_ = model(lowerCAmelCase__, training=lowerCAmelCase__) snake_case_ = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) def a_ ( self) -> Tuple: snake_case_ = self.prepare_config_and_inputs() ((snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs snake_case_ = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () SCREAMING_SNAKE_CASE_ = {"feature-extraction": TFViTMAEModel} if is_tf_available() else {} SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> Tuple: snake_case_ = TFViTMAEModelTester(self) snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=37) def a_ ( self) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds') def a_ ( self) -> int: pass def a_ ( self) -> Tuple: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) self.assertIsInstance(model.get_input_embeddings(), (tf.keras.layers.Layer)) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__, tf.keras.layers.Layer)) def a_ ( self) -> str: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['pixel_values'] self.assertListEqual(arg_names[:1], lowerCAmelCase__) def a_ ( self) -> Optional[int]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def a_ ( self) -> List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase__) def a_ ( self) -> Optional[int]: # make the mask reproducible np.random.seed(2) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, noise=lowerCAmelCase__) snake_case_ = copy.deepcopy(self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__)) snake_case_ = model(**lowerCAmelCase__, noise=lowerCAmelCase__) snake_case_ = outputs_dict[0].numpy() snake_case_ = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6) def a_ ( self) -> Union[str, Any]: # make the mask reproducible np.random.seed(2) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) def prepare_numpy_arrays(lowerCAmelCase__): snake_case_ = {} for k, v in inputs_dict.items(): if tf.is_tensor(lowerCAmelCase__): snake_case_ = v.numpy() else: snake_case_ = np.array(lowerCAmelCase__) return inputs_np_dict for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = prepare_numpy_arrays(lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, noise=lowerCAmelCase__) snake_case_ = model(**lowerCAmelCase__, noise=lowerCAmelCase__) self.assert_outputs_same(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: # make masks reproducible np.random.seed(2) snake_case_ = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) snake_case_ = tf.constant(lowerCAmelCase__) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument snake_case_ = tf_noise super().check_pt_tf_models(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Optional[int]: # make mask reproducible np.random.seed(2) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__),) for module_member_name in dir(lowerCAmelCase__) if module_member_name.endswith('MainLayer') # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer')] == model_class.__name__[: -len('Model')] for module_member in (getattr(lowerCAmelCase__, lowerCAmelCase__),) if isinstance(lowerCAmelCase__, lowerCAmelCase__) and tf.keras.layers.Layer in module_member.__bases__ and getattr(lowerCAmelCase__, '_keras_serializable', lowerCAmelCase__) } snake_case_ = int((config.image_size // config.patch_size) ** 2) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) snake_case_ = tf.convert_to_tensor(lowerCAmelCase__) inputs_dict.update({'noise': noise}) for main_layer_class in tf_main_layer_classes: snake_case_ = main_layer_class(lowerCAmelCase__) snake_case_ = { name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items() } snake_case_ = tf.keras.Model(lowerCAmelCase__, outputs=main_layer(lowerCAmelCase__)) snake_case_ = model(lowerCAmelCase__) with tempfile.TemporaryDirectory() as tmpdirname: snake_case_ = os.path.join(lowerCAmelCase__, 'keras_model.h5') model.save(lowerCAmelCase__) snake_case_ = tf.keras.models.load_model( lowerCAmelCase__, custom_objects={main_layer_class.__name__: main_layer_class}) assert isinstance(lowerCAmelCase__, tf.keras.Model) snake_case_ = model(lowerCAmelCase__) self.assert_outputs_same(lowerCAmelCase__, lowerCAmelCase__) @slow def a_ ( self) -> str: # make mask reproducible np.random.seed(2) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, noise=lowerCAmelCase__) if model_class.__name__ == "TFViTMAEModel": snake_case_ = outputs.last_hidden_state.numpy() snake_case_ = 0 else: snake_case_ = outputs.logits.numpy() snake_case_ = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowerCAmelCase__, saved_model=lowerCAmelCase__) snake_case_ = model_class.from_pretrained(lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, noise=lowerCAmelCase__) if model_class.__name__ == "TFViTMAEModel": snake_case_ = after_outputs['last_hidden_state'].numpy() snake_case_ = 0 else: snake_case_ = after_outputs['logits'].numpy() snake_case_ = 0 snake_case_ = np.amax(np.abs(out_a - out_a)) self.assertLessEqual(lowerCAmelCase__, 1e-5) def a_ ( self) -> List[str]: # make mask reproducible np.random.seed(2) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = int((config.image_size // config.patch_size) ** 2) snake_case_ = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__, noise=lowerCAmelCase__) snake_case_ = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(lowerCAmelCase__) snake_case_ = model_class.from_config(model.get_config()) # make sure it also accepts a normal config snake_case_ = model_class.from_config(model.config) snake_case_ = new_model(lowerCAmelCase__) # Build model new_model.set_weights(model.get_weights()) snake_case_ = new_model(lowerCAmelCase__, noise=lowerCAmelCase__) self.assert_outputs_same(lowerCAmelCase__, lowerCAmelCase__) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.') def a_ ( self) -> Dict: pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load') def a_ ( self) -> Any: pass @slow def a_ ( self) -> List[Any]: snake_case_ = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224') self.assertIsNotNone(lowerCAmelCase__) def UpperCAmelCase ( ) -> List[str]: snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def a_ ( self) -> Optional[Any]: return ViTImageProcessor.from_pretrained('facebook/vit-mae-base') if is_vision_available() else None @slow def a_ ( self) -> Dict: # make random mask reproducible across the PT and TF model np.random.seed(2) snake_case_ = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base') snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='tf') # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) snake_case_ = ViTMAEConfig() snake_case_ = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) snake_case_ = np.random.uniform(size=(1, num_patches)) # forward pass snake_case_ = model(**lowerCAmelCase__, noise=lowerCAmelCase__) # verify the logits snake_case_ = tf.convert_to_tensor([1, 196, 768]) self.assertEqual(outputs.logits.shape, lowerCAmelCase__) snake_case_ = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]]) tf.debugging.assert_near(outputs.logits[0, :3, :3], lowerCAmelCase__, atol=1e-4)
312
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a_ ( self, lowerCAmelCase__=0) -> List[Any]: snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__)) snake_case_ = np.random.RandomState(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def a_ ( self) -> List[str]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> str: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # warmup pass to apply optimizations snake_case_ = pipe(**self.get_dummy_inputs()) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> int: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): @property def a_ ( self) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self) -> str: snake_case_ = ort.SessionOptions() snake_case_ = False return options def a_ ( self) -> Any: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) # using the PNDM scheduler by default snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def a_ ( self) -> List[Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) snake_case_ = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx') snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
312
1
"""simple docstring""" import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __UpperCamelCase = '''0.12''' # assumed parallelism: 8 if is_torch_available(): import torch def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None ) -> Union[str, Any]: if rng is None: snake_case_ = random.Random() snake_case_ = 1 for dim in shape: total_dims *= dim snake_case_ = [] for _ in range(UpperCAmelCase ): values.append(rng.randint(0 , vocab_size - 1 ) ) snake_case_ = np.array(UpperCAmelCase , dtype=jnp.intaa ).reshape(UpperCAmelCase ) return output def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Optional[int]: snake_case_ = ids_tensor(UpperCAmelCase , vocab_size=2 , rng=UpperCAmelCase ) # make sure that at least one token is attended to for each batch snake_case_ = 1 return attn_mask @require_flax class UpperCamelCase : SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = () def a_ ( self) -> Optional[int]: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 snake_case_ = 2 snake_case_ = inputs['input_ids'].shape[-1] // 2 snake_case_ = inputs['input_ids'][:max_batch_size, :sequence_length] snake_case_ = jnp.ones_like(lowerCAmelCase__) snake_case_ = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens snake_case_ = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` snake_case_ = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def a_ ( self) -> Union[str, Any]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = False snake_case_ = max_length snake_case_ = 0 for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model_class.__name__[4:] # Skip the "Flax" at the beginning snake_case_ = getattr(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = pt_model_class(lowerCAmelCase__).eval() snake_case_ = load_flax_weights_in_pytorch_model(lowerCAmelCase__, flax_model.params) snake_case_ = flax_model.generate(lowerCAmelCase__).sequences snake_case_ = pt_model.generate(torch.tensor(lowerCAmelCase__, dtype=torch.long)) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: snake_case_ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist(), flax_generation_outputs.tolist()) def a_ ( self) -> Optional[Any]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = False snake_case_ = max_length for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> Union[str, Any]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = True snake_case_ = max_length for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> List[Any]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = False snake_case_ = max_length snake_case_ = 2 for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> Optional[int]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = False snake_case_ = max_length snake_case_ = 2 snake_case_ = 2 for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[0], input_ids.shape[0] * config.num_return_sequences) def a_ ( self) -> Any: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = True snake_case_ = max_length snake_case_ = 0.8 snake_case_ = 10 snake_case_ = 0.3 snake_case_ = 1 snake_case_ = 8 snake_case_ = 9 for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> Any: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = max_length snake_case_ = 1 snake_case_ = 8 snake_case_ = 9 for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> int: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() snake_case_ = max_length snake_case_ = 2 snake_case_ = 1 snake_case_ = 8 snake_case_ = 9 for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> Dict: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() # pad attention mask on the left snake_case_ = attention_mask.at[(0, 0)].set(0) snake_case_ = False snake_case_ = max_length for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__, attention_mask=lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__, attention_mask=lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> Optional[int]: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() # pad attention mask on the left snake_case_ = attention_mask.at[(0, 0)].set(0) snake_case_ = True snake_case_ = max_length for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__, attention_mask=lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__, attention_mask=lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) def a_ ( self) -> Any: snake_case_ , snake_case_ , snake_case_ , snake_case_ = self._get_input_ids_and_config() # pad attention mask on the left snake_case_ = attention_mask.at[(0, 0)].set(0) snake_case_ = 2 snake_case_ = max_length for model_class in self.all_generative_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = model.generate(lowerCAmelCase__, attention_mask=lowerCAmelCase__).sequences self.assertEqual(generation_outputs.shape[-1], lowerCAmelCase__) snake_case_ = jit(model.generate) snake_case_ = jit_generate(lowerCAmelCase__, attention_mask=lowerCAmelCase__).sequences self.assertListEqual(generation_outputs.tolist(), jit_generation_outputs.tolist()) @require_flax class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Dict: snake_case_ = AutoTokenizer.from_pretrained('hf-internal-testing/tiny-bert') snake_case_ = FlaxAutoModelForCausalLM.from_pretrained('hf-internal-testing/tiny-bert-flax-only') snake_case_ = 'Hello world' snake_case_ = tokenizer(lowerCAmelCase__, return_tensors='np').input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(lowerCAmelCase__, 'do_samples'): model.generate(lowerCAmelCase__, do_samples=lowerCAmelCase__) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(lowerCAmelCase__, 'foo'): snake_case_ = {'foo': 'bar'} model.generate(lowerCAmelCase__, **lowerCAmelCase__)
312
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase = False __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''ybelkada/fonts''' def UpperCAmelCase ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' 'Pix2StructImageProcessor. Please upgrade torch.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: requires_backends(UpperCAmelCase , ['torch'] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image: requires_backends(UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=UpperCAmelCase ) snake_case_ = '\n'.join(UpperCAmelCase ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(UpperCAmelCase ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' ) snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase ) snake_case_ = ImageDraw.Draw(UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(UpperCAmelCase ) if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) return new_image class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["flattened_patches"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST) snake_case_ = torch.from_numpy(lowerCAmelCase__) snake_case_ , snake_case_ = patch_size['height'], patch_size['width'] snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1) snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1) snake_case_ = max(num_feasible_rows * patch_height, 1) snake_case_ = max(num_feasible_cols * patch_width, 1) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1]) snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa) snake_case_ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float() snake_case_ = to_numpy_array(lowerCAmelCase__) return result def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa) # take mean across the whole `image` snake_case_ = np.mean(lowerCAmelCase__) snake_case_ = np.std(lowerCAmelCase__) snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput: snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get('data_format', lowerCAmelCase__) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__) snake_case_ = kwargs.pop('font_path', lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [header_text] * len(lowerCAmelCase__) snake_case_ = [ render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] snake_case_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__) return encoded_outputs
312
1
"""simple docstring""" import math def UpperCAmelCase ( UpperCAmelCase = 100 ) -> int: snake_case_ = sum(i * i for i in range(1 , n + 1 ) ) snake_case_ = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) ) return square_of_sum - sum_of_squares if __name__ == "__main__": print(F"""{solution() = }""")
312
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
1
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {} __UpperCamelCase = {} __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]: snake_case_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) snake_case_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) snake_case_ = format_type def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]: snake_case_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): snake_case_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: __UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: __UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: __UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter: snake_case_ = get_format_type_from_alias(UpperCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
312
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "trajectory_transformer" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = action_weight snake_case_ = reward_weight snake_case_ = value_weight snake_case_ = max_position_embeddings snake_case_ = block_size snake_case_ = action_dim snake_case_ = observation_dim snake_case_ = transition_dim snake_case_ = learning_rate snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_embd snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = resid_pdrop snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = kaiming_initializer_range snake_case_ = use_cache super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
1
"""simple docstring""" import functools def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: # Validation if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase ) == 0: return 0 if min(UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case_ = set(UpperCAmelCase ) @functools.cache def dynamic_programming(UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["keras_nlp"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['keras_nlp'])
312
1
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = { '''configuration_xmod''': [ '''XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XmodConfig''', '''XmodOnnxConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''XMOD_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XmodForCausalLM''', '''XmodForMaskedLM''', '''XmodForMultipleChoice''', '''XmodForQuestionAnswering''', '''XmodForSequenceClassification''', '''XmodForTokenClassification''', '''XmodModel''', '''XmodPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xmod import XMOD_PRETRAINED_CONFIG_ARCHIVE_MAP, XmodConfig, XmodOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xmod import ( XMOD_PRETRAINED_MODEL_ARCHIVE_LIST, XmodForCausalLM, XmodForMaskedLM, XmodForMultipleChoice, XmodForQuestionAnswering, XmodForSequenceClassification, XmodForTokenClassification, XmodModel, XmodPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = BarthezTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
1
"""simple docstring""" from __future__ import annotations import numpy as np from numpy import floataa from numpy.typing import NDArray def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> list[float]: snake_case_ , snake_case_ = coefficient_matrix.shape snake_case_ , snake_case_ = constant_matrix.shape if rowsa != colsa: snake_case_ = f'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}' raise ValueError(UpperCAmelCase ) if colsa != 1: snake_case_ = f'Constant matrix must be nx1 but received {rowsa}x{colsa}' raise ValueError(UpperCAmelCase ) if rowsa != rowsa: snake_case_ = ( 'Coefficient and constant matrices dimensions must be nxn and nx1 but ' f'received {rowsa}x{colsa} and {rowsa}x{colsa}' ) raise ValueError(UpperCAmelCase ) if len(UpperCAmelCase ) != rowsa: snake_case_ = ( 'Number of initial values must be equal to number of rows in coefficient ' f'matrix but received {len(UpperCAmelCase )} and {rowsa}' ) raise ValueError(UpperCAmelCase ) if iterations <= 0: raise ValueError('Iterations must be at least 1' ) snake_case_ = np.concatenate( (coefficient_matrix, constant_matrix) , axis=1 ) snake_case_ , snake_case_ = table.shape strictly_diagonally_dominant(UpperCAmelCase ) # Iterates the whole matrix for given number of times for _ in range(UpperCAmelCase ): snake_case_ = [] for row in range(UpperCAmelCase ): snake_case_ = 0 for col in range(UpperCAmelCase ): if col == row: snake_case_ = table[row][col] elif col == cols - 1: snake_case_ = table[row][col] else: temp += (-1) * table[row][col] * init_val[col] snake_case_ = (temp + val) / denom new_val.append(UpperCAmelCase ) snake_case_ = new_val return [float(UpperCAmelCase ) for i in new_val] def UpperCAmelCase ( UpperCAmelCase ) -> bool: snake_case_ , snake_case_ = table.shape snake_case_ = True for i in range(0 , UpperCAmelCase ): snake_case_ = 0 for j in range(0 , cols - 1 ): if i == j: continue else: total += table[i][j] if table[i][i] <= total: raise ValueError('Coefficient matrix is not strictly diagonally dominant' ) return is_diagonally_dominant # Test Cases if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" import functools def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: # Validation if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase ) == 0: return 0 if min(UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case_ = set(UpperCAmelCase ) @functools.cache def dynamic_programming(UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "gpt_neox_japanese" def __init__( self, lowerCAmelCase__=3_2000, lowerCAmelCase__=2560, lowerCAmelCase__=32, lowerCAmelCase__=32, lowerCAmelCase__=4, lowerCAmelCase__="gelu", lowerCAmelCase__=1.00, lowerCAmelCase__=1_0000, lowerCAmelCase__=2048, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=True, lowerCAmelCase__=3_1996, lowerCAmelCase__=3_1999, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0, **lowerCAmelCase__, ) -> int: super().__init__(bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = vocab_size snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_multiple_size snake_case_ = hidden_act snake_case_ = rotary_pct snake_case_ = rotary_emb_base snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = use_cache snake_case_ = attention_dropout snake_case_ = hidden_dropout
312
"""simple docstring""" import copy import re class UpperCamelCase : SCREAMING_SNAKE_CASE_ = "hp" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None @classmethod def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = prefix snake_case_ = defaults cls.build_naming_info() @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: if len(lowerCAmelCase__) == 0: return "" snake_case_ = None if any(char.isdigit() for char in word): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(lowerCAmelCase__) + 1): snake_case_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: snake_case_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase__): snake_case_ = '' while integer != 0: snake_case_ = chr(ord('A') + integer % 10) + s integer //= 10 return s snake_case_ = 0 while True: snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__) if sword in info["reverse_short_word"]: continue else: snake_case_ = sword break snake_case_ = short_word snake_case_ = word return short_word @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = param_name.split('_') snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name snake_case_ = ['', '_'] for separator in separators: snake_case_ = separator.join(lowerCAmelCase__) if shortname not in info["reverse_short_param"]: snake_case_ = shortname snake_case_ = param_name return shortname return param_name @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = short_name snake_case_ = param_name @classmethod def a_ ( cls) -> List[str]: if cls.NAMING_INFO is not None: return snake_case_ = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } snake_case_ = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = info @classmethod def a_ ( cls, lowerCAmelCase__) -> List[Any]: cls.build_naming_info() assert cls.PREFIX is not None snake_case_ = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue snake_case_ = cls.NAMING_INFO['short_param'][k] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = 1 if v else 0 snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-' snake_case_ = f'{key}{sep}{v}' name.append(lowerCAmelCase__) return "_".join(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__) -> Optional[Any]: snake_case_ = repr[len(cls.PREFIX) + 1 :] if repr == "": snake_case_ = [] else: snake_case_ = repr.split('_') snake_case_ = {} for value in values: if "-" in value: snake_case_ , snake_case_ = value.split('-') else: snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__) snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__)) snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k] snake_case_ = p_v for k in cls.DEFAULTS: if k not in parameters: snake_case_ = cls.DEFAULTS[k] return parameters
312
1
"""simple docstring""" import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = FlaxAutoencoderKL @property def a_ ( self) -> Tuple: snake_case_ = 4 snake_case_ = 3 snake_case_ = (32, 32) snake_case_ = jax.random.PRNGKey(0) snake_case_ = jax.random.uniform(lowerCAmelCase__, ((batch_size, num_channels) + sizes)) return {"sample": image, "prng_key": prng_key} def a_ ( self) -> Optional[int]: snake_case_ = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 4, } snake_case_ = self.dummy_input return init_dict, inputs_dict
312
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = str(UpperCAmelCase ) dataset_info.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfo.from_directory(UpperCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCAmelCase ) snake_case_ = yaml.safe_load(UpperCAmelCase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = str(UpperCAmelCase ) dataset_infos_dict.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
312
1
"""simple docstring""" from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["pixel_values"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = IMAGENET_DEFAULT_MEAN, lowerCAmelCase__ = IMAGENET_DEFAULT_STD, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = size if size is not None else {'shortest_edge': 224} snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__) snake_case_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size') snake_case_ = do_resize snake_case_ = size snake_case_ = resample snake_case_ = do_center_crop snake_case_ = crop_size snake_case_ = do_rescale snake_case_ = rescale_factor snake_case_ = do_normalize snake_case_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN snake_case_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: snake_case_ = int((256 / 224) * size['shortest_edge']) snake_case_ = get_resize_output_image_size(lowerCAmelCase__, size=lowerCAmelCase__, default_to_square=lowerCAmelCase__) snake_case_ = {'height': output_size[0], 'width': output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}') return resize( lowerCAmelCase__, size=(size_dict['height'], size_dict['width']), resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: snake_case_ = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(f'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}') return center_crop(lowerCAmelCase__, size=(size['height'], size['width']), data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> BatchFeature: snake_case_ = do_resize if do_resize is not None else self.do_resize snake_case_ = resample if resample is not None else self.resample snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ = do_rescale if do_rescale is not None else self.do_rescale snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = image_mean if image_mean is not None else self.image_mean snake_case_ = image_std if image_std is not None else self.image_std snake_case_ = size if size is not None else self.size snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__) snake_case_ = crop_size if crop_size is not None else self.crop_size snake_case_ = get_size_dict(lowerCAmelCase__, param_name='crop_size') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: snake_case_ = [self.resize(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) for image in images] if do_center_crop: snake_case_ = [self.center_crop(lowerCAmelCase__, lowerCAmelCase__) for image in images] if do_rescale: snake_case_ = [self.rescale(lowerCAmelCase__, lowerCAmelCase__) for image in images] if do_normalize: snake_case_ = [self.normalize(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) for image in images] snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images] snake_case_ = {'pixel_values': images} return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
312
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE_ = frozenset([] ) def a_ ( self) -> Any: torch.manual_seed(0) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, ) snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) torch.manual_seed(0) snake_case_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0) snake_case_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, ) snake_case_ = CLIPTextModel(lowerCAmelCase__) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') snake_case_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) snake_case_ = image.cpu().permute(0, 2, 3, 1)[0] snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64)) snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64)) if str(lowerCAmelCase__).startswith('mps'): snake_case_ = torch.manual_seed(lowerCAmelCase__) else: snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a_ ( self) -> Dict: snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__) snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs(lowerCAmelCase__) snake_case_ = sd_pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> Union[str, Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def a_ ( self) -> Optional[int]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def a_ ( self) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler') snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
312
1
"""simple docstring""" import cva import numpy as np class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict: if k in (0.04, 0.06): snake_case_ = k snake_case_ = window_size else: raise ValueError('invalid k value') def __str__( self) -> str: return str(self.k) def a_ ( self, lowerCAmelCase__) -> tuple[cva.Mat, list[list[int]]]: snake_case_ = cva.imread(lowerCAmelCase__, 0) snake_case_ , snake_case_ = img.shape snake_case_ = [] snake_case_ = img.copy() snake_case_ = cva.cvtColor(lowerCAmelCase__, cva.COLOR_GRAY2RGB) snake_case_ , snake_case_ = np.gradient(lowerCAmelCase__) snake_case_ = dx**2 snake_case_ = dy**2 snake_case_ = dx * dy snake_case_ = 0.04 snake_case_ = self.window_size // 2 for y in range(lowerCAmelCase__, h - offset): for x in range(lowerCAmelCase__, w - offset): snake_case_ = ixx[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() snake_case_ = iyy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() snake_case_ = ixy[ y - offset : y + offset + 1, x - offset : x + offset + 1 ].sum() snake_case_ = (wxx * wyy) - (wxy**2) snake_case_ = wxx + wyy snake_case_ = det - k * (trace**2) # Can change the value if r > 0.5: corner_list.append([x, y, r]) color_img.itemset((y, x, 0), 0) color_img.itemset((y, x, 1), 0) color_img.itemset((y, x, 2), 255) return color_img, corner_list if __name__ == "__main__": __UpperCamelCase = HarrisCorner(0.04, 3) __UpperCamelCase , __UpperCamelCase = edge_detect.detect('''path_to_image''') cva.imwrite('''detect.png''', color_img)
312
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} ) SCREAMING_SNAKE_CASE_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: snake_case_ = processors[data_args.task_name]() snake_case_ = processor.get_labels() snake_case_ = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
1
"""simple docstring""" import argparse import intel_extension_for_pytorch as ipex import torch from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline __UpperCamelCase = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False) parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''') parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = '''cpu''' __UpperCamelCase = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings''' __UpperCamelCase = '''path-to-your-trained-model''' __UpperCamelCase = StableDiffusionPipeline.from_pretrained(model_id) if args.dpm: __UpperCamelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) __UpperCamelCase = pipe.to(device) # to channels last __UpperCamelCase = pipe.unet.to(memory_format=torch.channels_last) __UpperCamelCase = pipe.vae.to(memory_format=torch.channels_last) __UpperCamelCase = pipe.text_encoder.to(memory_format=torch.channels_last) if pipe.requires_safety_checker: __UpperCamelCase = pipe.safety_checker.to(memory_format=torch.channels_last) # optimize with ipex __UpperCamelCase = torch.randn(2, 4, 64, 64) __UpperCamelCase = torch.rand(1) * 999 __UpperCamelCase = torch.randn(2, 77, 768) __UpperCamelCase = (sample, timestep, encoder_hidden_status) try: __UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example) except Exception: __UpperCamelCase = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True) __UpperCamelCase = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True) __UpperCamelCase = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True) if pipe.requires_safety_checker: __UpperCamelCase = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True) # compute __UpperCamelCase = 666 __UpperCamelCase = torch.Generator(device).manual_seed(seed) __UpperCamelCase = {'''generator''': generator} if args.steps is not None: __UpperCamelCase = args.steps with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa): __UpperCamelCase = pipe(prompt, **generate_kwargs).images[0] # save image image.save('''generated.png''')
312
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
1
"""simple docstring""" from pathlib import Path import fire from tqdm import tqdm def UpperCAmelCase ( UpperCAmelCase="ro" , UpperCAmelCase="en" , UpperCAmelCase="wmt16" , UpperCAmelCase=None ) -> None: try: import datasets except (ModuleNotFoundError, ImportError): raise ImportError('run pip install datasets' ) snake_case_ = f'{src_lang}-{tgt_lang}' print(f'Converting {dataset}-{pair}' ) snake_case_ = datasets.load_dataset(UpperCAmelCase , UpperCAmelCase ) if save_dir is None: snake_case_ = f'{dataset}-{pair}' snake_case_ = Path(UpperCAmelCase ) save_dir.mkdir(exist_ok=UpperCAmelCase ) for split in ds.keys(): print(f'Splitting {split} with {ds[split].num_rows} records' ) # to save to val.source, val.target like summary datasets snake_case_ = 'val' if split == 'validation' else split snake_case_ = save_dir.joinpath(f'{fn}.source' ) snake_case_ = save_dir.joinpath(f'{fn}.target' ) snake_case_ = src_path.open('w+' ) snake_case_ = tgt_path.open('w+' ) # reader is the bottleneck so writing one record at a time doesn't slow things down for x in tqdm(ds[split] ): snake_case_ = x['translation'] src_fp.write(ex[src_lang] + '\n' ) tgt_fp.write(ex[tgt_lang] + '\n' ) print(f'Saved {dataset} dataset to {save_dir}' ) if __name__ == "__main__": fire.Fire(download_wmt_dataset)
312
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict: snake_case_ = [] for old_item in old_list: snake_case_ = old_item.replace('in_layers.0' , 'norm1' ) snake_case_ = new_item.replace('in_layers.2' , 'conv1' ) snake_case_ = new_item.replace('out_layers.0' , 'norm2' ) snake_case_ = new_item.replace('out_layers.3' , 'conv2' ) snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' ) snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]: snake_case_ = [] for old_item in old_list: snake_case_ = old_item snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' ) snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' ) snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]: assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case_ = old_checkpoint[path] snake_case_ = old_tensor.shape[0] // 3 snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3 snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 ) snake_case_ = query.reshape(UpperCAmelCase ) snake_case_ = key.reshape(UpperCAmelCase ) snake_case_ = value.reshape(UpperCAmelCase ) for path in paths: snake_case_ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case_ = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case_ = old_checkpoint[path['old']][:, :, 0] else: snake_case_ = old_checkpoint[path['old']] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = {} snake_case_ = checkpoint['time_embed.0.weight'] snake_case_ = checkpoint['time_embed.0.bias'] snake_case_ = checkpoint['time_embed.2.weight'] snake_case_ = checkpoint['time_embed.2.bias'] snake_case_ = checkpoint['input_blocks.0.0.weight'] snake_case_ = checkpoint['input_blocks.0.0.bias'] snake_case_ = checkpoint['out.0.weight'] snake_case_ = checkpoint['out.0.bias'] snake_case_ = checkpoint['out.2.weight'] snake_case_ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the middle blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the output blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } for i in range(1 , UpperCAmelCase ): snake_case_ = (i - 1) // (config['num_res_blocks'] + 1) snake_case_ = (i - 1) % (config['num_res_blocks'] + 1) snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.weight' ] snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase ) if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , ) snake_case_ = middle_blocks[0] snake_case_ = middle_blocks[1] snake_case_ = middle_blocks[2] snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase ) for i in range(UpperCAmelCase ): snake_case_ = i // (config['num_res_blocks'] + 1) snake_case_ = i % (config['num_res_blocks'] + 1) snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]] snake_case_ = {} for layer in output_block_layers: snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCAmelCase ) else: snake_case_ = [layer_name] if len(UpperCAmelCase ) > 1: snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(UpperCAmelCase ) == 2: snake_case_ = [] if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , ) else: snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] ) snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] ) snake_case_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __UpperCamelCase = json.loads(f.read()) __UpperCamelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __UpperCamelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
312
1
"""simple docstring""" from __future__ import annotations import numpy as np def UpperCAmelCase ( UpperCAmelCase ) -> str: return np.maximum(0 , UpperCAmelCase ) if __name__ == "__main__": print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
312
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # vision encoder if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCAmelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCAmelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def UpperCAmelCase ( ) -> Any: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int: snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCAmelCase ).eval() snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) processor.save_pretrained(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) print('Successfully saved processor and model to' , UpperCAmelCase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase , organization='nielsr' ) model.push_to_hub(UpperCAmelCase , organization='nielsr' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) __UpperCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
1
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if not scores: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] return ( max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) if is_max else min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print(f'Optimal value : {minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )}' ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
1
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=[30, 30], lowerCAmelCase__=2, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=32, lowerCAmelCase__=5, lowerCAmelCase__=4, lowerCAmelCase__=37, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=10, lowerCAmelCase__=0.02, lowerCAmelCase__=3, lowerCAmelCase__=None, lowerCAmelCase__=8, lowerCAmelCase__=10, ) -> Optional[Any]: snake_case_ = parent snake_case_ = batch_size snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = is_training snake_case_ = use_labels snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = scope snake_case_ = n_targets snake_case_ = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens snake_case_ = (image_size[1] // patch_size) * (image_size[0] // patch_size) snake_case_ = num_patches + 1 + self.num_detection_tokens def a_ ( self) -> Dict: snake_case_ = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]]) snake_case_ = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) snake_case_ = [] for i in range(self.batch_size): snake_case_ = {} snake_case_ = torch.randint( high=self.num_labels, size=(self.n_targets,), device=lowerCAmelCase__) snake_case_ = torch.rand(self.n_targets, 4, device=lowerCAmelCase__) labels.append(lowerCAmelCase__) snake_case_ = self.get_config() return config, pixel_values, labels def a_ ( self) -> Any: return YolosConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=lowerCAmelCase__, initializer_range=self.initializer_range, num_detection_tokens=self.num_detection_tokens, num_labels=self.num_labels, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = YolosModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.expected_seq_len, self.hidden_size)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: snake_case_ = YolosForObjectDetection(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(pixel_values=lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) snake_case_ = model(pixel_values=lowerCAmelCase__, labels=lowerCAmelCase__) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_detection_tokens, self.num_labels + 1)) self.parent.assertEqual(result.pred_boxes.shape, (self.batch_size, self.num_detection_tokens, 4)) def a_ ( self) -> List[str]: snake_case_ = self.prepare_config_and_inputs() snake_case_ , snake_case_ , snake_case_ = config_and_inputs snake_case_ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = (YolosModel, YolosForObjectDetection) if is_torch_available() else () SCREAMING_SNAKE_CASE_ = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=False) -> Tuple: snake_case_ = super()._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__, return_labels=lowerCAmelCase__) if return_labels: if model_class.__name__ == "YolosForObjectDetection": snake_case_ = [] for i in range(self.model_tester.batch_size): snake_case_ = {} snake_case_ = torch.ones( size=(self.model_tester.n_targets,), device=lowerCAmelCase__, dtype=torch.long) snake_case_ = torch.ones( self.model_tester.n_targets, 4, device=lowerCAmelCase__, dtype=torch.float) labels.append(lowerCAmelCase__) snake_case_ = labels return inputs_dict def a_ ( self) -> str: snake_case_ = YolosModelTester(self) snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, has_text_modality=lowerCAmelCase__, hidden_size=37) def a_ ( self) -> Optional[Any]: self.config_tester.run_common_tests() def a_ ( self) -> int: # YOLOS does not use inputs_embeds pass def a_ ( self) -> Optional[int]: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) snake_case_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase__, nn.Linear)) def a_ ( self) -> Any: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = model_class(lowerCAmelCase__) snake_case_ = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic snake_case_ = [*signature.parameters.keys()] snake_case_ = ['pixel_values'] self.assertListEqual(arg_names[:1], lowerCAmelCase__) def a_ ( self) -> Any: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase__) def a_ ( self) -> Optional[int]: snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() snake_case_ = True # in YOLOS, the seq_len is different snake_case_ = self.model_tester.expected_seq_len for model_class in self.all_model_classes: snake_case_ = True snake_case_ = False snake_case_ = True snake_case_ = model_class(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__)) snake_case_ = outputs.attentions self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] snake_case_ = True snake_case_ = model_class(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__)) snake_case_ = outputs.attentions self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) snake_case_ = len(lowerCAmelCase__) # Check attention is always last and order is fine snake_case_ = True snake_case_ = True snake_case_ = model_class(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__)) snake_case_ = 1 self.assertEqual(out_len + added_hidden_states, len(lowerCAmelCase__)) snake_case_ = outputs.attentions self.assertEqual(len(lowerCAmelCase__), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, seq_len, seq_len], ) def a_ ( self) -> Dict: def check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__): snake_case_ = model_class(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() with torch.no_grad(): snake_case_ = model(**self._prepare_for_class(lowerCAmelCase__, lowerCAmelCase__)) snake_case_ = outputs.hidden_states snake_case_ = getattr( self.model_tester, 'expected_num_hidden_layers', self.model_tester.num_hidden_layers + 1) self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__) # YOLOS has a different seq_length snake_case_ = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: snake_case_ = True check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] snake_case_ = True check_hidden_states_output(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Optional[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*lowerCAmelCase__) @slow def a_ ( self) -> Dict: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = YolosModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) def UpperCAmelCase ( ) -> List[str]: snake_case_ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def a_ ( self) -> Union[str, Any]: return AutoImageProcessor.from_pretrained('hustvl/yolos-small') if is_vision_available() else None @slow def a_ ( self) -> Any: snake_case_ = YolosForObjectDetection.from_pretrained('hustvl/yolos-small').to(lowerCAmelCase__) snake_case_ = self.default_image_processor snake_case_ = prepare_img() snake_case_ = image_processor(images=lowerCAmelCase__, return_tensors='pt').to(lowerCAmelCase__) # forward pass with torch.no_grad(): snake_case_ = model(inputs.pixel_values) # verify outputs snake_case_ = torch.Size((1, 100, 92)) self.assertEqual(outputs.logits.shape, lowerCAmelCase__) snake_case_ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]], device=lowerCAmelCase__, ) snake_case_ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]], device=lowerCAmelCase__) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], lowerCAmelCase__, atol=1e-4)) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3], lowerCAmelCase__, atol=1e-4)) # verify postprocessing snake_case_ = image_processor.post_process_object_detection( lowerCAmelCase__, threshold=0.3, target_sizes=[image.size[::-1]])[0] snake_case_ = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861]).to(lowerCAmelCase__) snake_case_ = [75, 75, 17, 63, 17] snake_case_ = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495]).to(lowerCAmelCase__) self.assertEqual(len(results['scores']), 5) self.assertTrue(torch.allclose(results['scores'], lowerCAmelCase__, atol=1e-4)) self.assertSequenceEqual(results['labels'].tolist(), lowerCAmelCase__) self.assertTrue(torch.allclose(results['boxes'][0, :], lowerCAmelCase__))
312
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) snake_case_ = number_of_bytes // partitions snake_case_ = [] for i in range(UpperCAmelCase ): snake_case_ = i * bytes_per_partition + 1 snake_case_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" import copy from dataclasses import dataclass from pathlib import Path from typing import Dict, Optional, Union @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = 1 SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None def a_ ( self) -> "DownloadConfig": return self.__class__(**{k: copy.deepcopy(lowerCAmelCase__) for k, v in self.__dict__.items()})
312
"""simple docstring""" __UpperCamelCase = 256 # Modulus to hash a string __UpperCamelCase = 100_0003 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool: snake_case_ = len(UpperCAmelCase ) snake_case_ = len(UpperCAmelCase ) if p_len > t_len: return False snake_case_ = 0 snake_case_ = 0 snake_case_ = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCAmelCase ): snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: snake_case_ = 'abc1abc12' snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' snake_case_ = 'alskfjaldsk23adsfabcabc' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 2) snake_case_ = 'ABABX' snake_case_ = 'ABABZABABYABABX' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 3) snake_case_ = 'AAAB' snake_case_ = 'ABAAAAAB' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 4) snake_case_ = 'abcdabcy' snake_case_ = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 5) snake_case_ = 'Lü' snake_case_ = 'Lüsai' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'Lue' assert not rabin_karp(UpperCAmelCase , UpperCAmelCase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
312
1
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = "x" , UpperCAmelCase = 10**-10 , UpperCAmelCase = 1 , ) -> complex: snake_case_ = symbols(UpperCAmelCase ) snake_case_ = lambdify(UpperCAmelCase , UpperCAmelCase ) snake_case_ = lambdify(UpperCAmelCase , diff(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = starting_point while True: if diff_function(UpperCAmelCase ) != 0: snake_case_ = prev_guess - multiplicity * func(UpperCAmelCase ) / diff_function( UpperCAmelCase ) else: raise ZeroDivisionError('Could not find root' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess snake_case_ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"""The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}""") # Find root of polynomial # Find fourth Root of 5 print(F"""The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}""") # Find value of e print( '''The root of log(y) - 1 = 0 is ''', F"""{newton_raphson("log(y) - 1", 2, variable="y")}""", ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', F"""{newton_raphson("exp(x) - 1", 10, precision=0.005)}""", ) # Find root of cos(x) print(F"""The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}""")
312
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "resnet" SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}') snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = downsample_in_first_stage snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-3
312
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_imagegpt import ImageGPTImageProcessor __UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> None: warnings.warn( 'The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.' ' Please use ImageGPTImageProcessor instead.', lowerCAmelCase__, ) super().__init__(*lowerCAmelCase__, **lowerCAmelCase__)
312
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = 'this is a test' snake_case_ = 'this is a test' return input_text, output_text def a_ ( self) -> Optional[int]: snake_case_ = '<pad>' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '<pad>') self.assertEqual(vocab_keys[1], '<unk>') self.assertEqual(vocab_keys[-1], '[PAD]') self.assertEqual(len(lowerCAmelCase__), 3_0001) def a_ ( self) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size, 3_0000) def a_ ( self) -> List[str]: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> str: pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> List[Any]: pass def a_ ( self) -> str: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> List[Any]: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Any: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = 'This is a test' snake_case_ = [13, 1, 4398, 25, 21, 1289] snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = DebertaVaTokenizer(lowerCAmelCase__) snake_case_ = tokenizer.encode('sequence builders') snake_case_ = tokenizer.encode('multi-sequence build') snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, ) @slow def a_ ( self) -> Union[str, Any]: # fmt: off snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float: snake_case_ = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff) # formula for sum of series return total def UpperCAmelCase ( ) -> List[Any]: print(sum_of_series(1 , 1 , 10 ) ) if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {} __UpperCamelCase = {} __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]: snake_case_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) snake_case_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) snake_case_ = format_type def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]: snake_case_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): snake_case_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: __UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: __UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: __UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter: snake_case_ = get_format_type_from_alias(UpperCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
312
1
"""simple docstring""" import os def UpperCAmelCase ( ) -> List[str]: with open(os.path.dirname(UpperCAmelCase ) + '/p022_names.txt' ) as file: snake_case_ = str(file.readlines()[0] ) snake_case_ = names.replace('"' , '' ).split(',' ) names.sort() snake_case_ = 0 snake_case_ = 0 for i, name in enumerate(UpperCAmelCase ): for letter in name: name_score += ord(UpperCAmelCase ) - 64 total_score += (i + 1) * name_score snake_case_ = 0 return total_score if __name__ == "__main__": print(solution())
312
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __UpperCamelCase = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __UpperCamelCase = { '''RUCAIBox/mvp''': 1024, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = MvpTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type')) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**lowerCAmelCase__) snake_case_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ = 'post_processor' snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) if tokenizer_component_instance: snake_case_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ = tuple(state['sep']) if "cls" in state: snake_case_ = tuple(state['cls']) snake_case_ = False if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = add_prefix_space snake_case_ = True if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets: snake_case_ = trim_offsets snake_case_ = True if changes_to_apply: snake_case_ = getattr(lowerCAmelCase__, state.pop('type')) snake_case_ = component_class(**lowerCAmelCase__) setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) @property def a_ ( self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value snake_case_ = value def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str: snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
312
1
"""simple docstring""" import unittest from datasets import load_dataset from transformers import BloomTokenizerFast from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = BloomTokenizerFast SCREAMING_SNAKE_CASE_ = BloomTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = "tokenizer_file" SCREAMING_SNAKE_CASE_ = {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"} def a_ ( self) -> str: super().setUp() snake_case_ = BloomTokenizerFast.from_pretrained('bigscience/tokenizer') tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, **lowerCAmelCase__) -> Any: kwargs.update(self.special_tokens_map) return BloomTokenizerFast.from_pretrained(self.tmpdirname, **lowerCAmelCase__) def a_ ( self) -> Union[str, Any]: snake_case_ = self.get_rust_tokenizer() snake_case_ = ['The quick brown fox</s>', 'jumps over the lazy dog</s>'] snake_case_ = [[2175, 2_3714, 7_3173, 14_4252, 2], [77, 13_2619, 3478, 368, 10_9586, 3_5433, 2]] snake_case_ = tokenizer.batch_encode_plus(lowerCAmelCase__)['input_ids'] self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.batch_decode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__=6) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__) # tokenizer_r.pad_token = None # Hotfixing padding = None # Simple input snake_case_ = 'This is a simple input' snake_case_ = ['This is a simple input 1', 'This is a simple input 2'] snake_case_ = ('This is a simple input', 'This is a pair') snake_case_ = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests try: tokenizer_r.encode(lowerCAmelCase__, max_length=lowerCAmelCase__) tokenizer_r.encode_plus(lowerCAmelCase__, max_length=lowerCAmelCase__) tokenizer_r.batch_encode_plus(lowerCAmelCase__, max_length=lowerCAmelCase__) tokenizer_r.encode(lowerCAmelCase__, max_length=lowerCAmelCase__) tokenizer_r.batch_encode_plus(lowerCAmelCase__, max_length=lowerCAmelCase__) except ValueError: self.fail('Bloom Tokenizer should be able to deal with padding') snake_case_ = None # Hotfixing padding = None self.assertRaises(lowerCAmelCase__, tokenizer_r.encode, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Simple input self.assertRaises(lowerCAmelCase__, tokenizer_r.encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Simple input self.assertRaises( lowerCAmelCase__, tokenizer_r.batch_encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length', ) # Pair input self.assertRaises(lowerCAmelCase__, tokenizer_r.encode, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Pair input self.assertRaises(lowerCAmelCase__, tokenizer_r.encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length') # Pair input self.assertRaises( lowerCAmelCase__, tokenizer_r.batch_encode_plus, lowerCAmelCase__, max_length=lowerCAmelCase__, padding='max_length', ) def a_ ( self) -> Dict: snake_case_ = self.get_rust_tokenizer() snake_case_ = load_dataset('xnli', 'all_languages', split='test', streaming=lowerCAmelCase__) snake_case_ = next(iter(lowerCAmelCase__))['premise'] # pick up one data snake_case_ = list(sample_data.values()) snake_case_ = list(map(tokenizer.encode, lowerCAmelCase__)) snake_case_ = [tokenizer.decode(lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__) for x in output_tokens] self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Optional[Any]: # The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have # any sequence length constraints. This test of the parent class will fail since it relies on the # maximum sequence length of the positoonal embeddings. self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map), 1) self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]), 1)
312
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a_ ( self, lowerCAmelCase__=0) -> List[Any]: snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__)) snake_case_ = np.random.RandomState(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def a_ ( self) -> List[str]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> str: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # warmup pass to apply optimizations snake_case_ = pipe(**self.get_dummy_inputs()) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> int: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): @property def a_ ( self) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self) -> str: snake_case_ = ort.SessionOptions() snake_case_ = False return options def a_ ( self) -> Any: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) # using the PNDM scheduler by default snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def a_ ( self) -> List[Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) snake_case_ = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx') snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
312
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __UpperCamelCase = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
312
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase = False __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''ybelkada/fonts''' def UpperCAmelCase ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' 'Pix2StructImageProcessor. Please upgrade torch.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: requires_backends(UpperCAmelCase , ['torch'] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image: requires_backends(UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=UpperCAmelCase ) snake_case_ = '\n'.join(UpperCAmelCase ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(UpperCAmelCase ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' ) snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase ) snake_case_ = ImageDraw.Draw(UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(UpperCAmelCase ) if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) return new_image class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["flattened_patches"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST) snake_case_ = torch.from_numpy(lowerCAmelCase__) snake_case_ , snake_case_ = patch_size['height'], patch_size['width'] snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1) snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1) snake_case_ = max(num_feasible_rows * patch_height, 1) snake_case_ = max(num_feasible_cols * patch_width, 1) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1]) snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa) snake_case_ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float() snake_case_ = to_numpy_array(lowerCAmelCase__) return result def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa) # take mean across the whole `image` snake_case_ = np.mean(lowerCAmelCase__) snake_case_ = np.std(lowerCAmelCase__) snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput: snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get('data_format', lowerCAmelCase__) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__) snake_case_ = kwargs.pop('font_path', lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [header_text] * len(lowerCAmelCase__) snake_case_ = [ render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] snake_case_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__) return encoded_outputs
312
1
"""simple docstring""" from collections.abc import Sequence def UpperCAmelCase ( UpperCAmelCase = None ) -> int: if nums is None or not nums: raise ValueError('Input sequence should not be empty' ) snake_case_ = nums[0] for i in range(1 , len(UpperCAmelCase ) ): snake_case_ = nums[i] snake_case_ = max(UpperCAmelCase , ans + num , UpperCAmelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user __UpperCamelCase = int(input('''Enter number of elements : ''').strip()) __UpperCamelCase = list(map(int, input('''\nEnter the numbers : ''').strip().split()))[:n] print(max_subsequence_sum(array))
312
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = DiTPipeline SCREAMING_SNAKE_CASE_ = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE_ = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } SCREAMING_SNAKE_CASE_ = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> Any: torch.manual_seed(0) snake_case_ = TransformeraDModel( sample_size=16, num_layers=2, patch_size=4, attention_head_dim=8, num_attention_heads=2, in_channels=4, out_channels=8, attention_bias=lowerCAmelCase__, activation_fn='gelu-approximate', num_embeds_ada_norm=1000, norm_type='ada_norm_zero', norm_elementwise_affine=lowerCAmelCase__, ) snake_case_ = AutoencoderKL() snake_case_ = DDIMScheduler() snake_case_ = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> str: if str(lowerCAmelCase__).startswith('mps'): snake_case_ = torch.manual_seed(lowerCAmelCase__) else: snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) snake_case_ = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = 'cpu' snake_case_ = self.get_dummy_components() snake_case_ = self.pipeline_class(**lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs(lowerCAmelCase__) snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] self.assertEqual(image.shape, (1, 16, 16, 3)) snake_case_ = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457]) snake_case_ = np.abs(image_slice.flatten() - expected_slice).max() self.assertLessEqual(lowerCAmelCase__, 1e-3) def a_ ( self) -> str: self._test_inference_batch_single_identical(relax_max_difference=lowerCAmelCase__, expected_max_diff=1e-3) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def a_ ( self) -> List[str]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3) @require_torch_gpu @slow class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Tuple: super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> int: snake_case_ = torch.manual_seed(0) snake_case_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256') pipe.to('cuda') snake_case_ = ['vase', 'umbrella', 'white shark', 'white wolf'] snake_case_ = pipe.get_label_ids(lowerCAmelCase__) snake_case_ = pipe(lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=40, output_type='np').images for word, image in zip(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = load_numpy( f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy') assert np.abs((expected_image - image).max()) < 1e-2 def a_ ( self) -> int: snake_case_ = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.to('cuda') snake_case_ = ['vase', 'umbrella'] snake_case_ = pipe.get_label_ids(lowerCAmelCase__) snake_case_ = torch.manual_seed(0) snake_case_ = pipe(lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=25, output_type='np').images for word, image in zip(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f'/dit/{word}_512.npy') assert np.abs((expected_image - image).max()) < 1e-1
312
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "trajectory_transformer" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = action_weight snake_case_ = reward_weight snake_case_ = value_weight snake_case_ = max_position_embeddings snake_case_ = block_size snake_case_ = action_dim snake_case_ = observation_dim snake_case_ = transition_dim snake_case_ = learning_rate snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_embd snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = resid_pdrop snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = kaiming_initializer_range snake_case_ = use_cache super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
1
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> str: snake_case_ = len(list(m.modules())) == 1 or isinstance(lowerCAmelCase__, nn.Convad) or isinstance(lowerCAmelCase__, nn.BatchNormad) if has_not_submodules: self.traced.append(lowerCAmelCase__) def __call__( self, lowerCAmelCase__) -> Union[str, Any]: for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook)) self.module(lowerCAmelCase__) [x.remove() for x in self.handles] return self @property def a_ ( self) -> Any: # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda lowerCAmelCase__: len(list(x.state_dict().keys())) > 0, self.traced)) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ = field(default_factory=lowerCAmelCase__ ) def __call__( self, lowerCAmelCase__) -> Optional[int]: snake_case_ = Tracker(self.dest)(lowerCAmelCase__).parametrized snake_case_ = Tracker(self.src)(lowerCAmelCase__).parametrized snake_case_ = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.src_skip, lowerCAmelCase__)) snake_case_ = list(filter(lambda lowerCAmelCase__: type(lowerCAmelCase__) not in self.dest_skip, lowerCAmelCase__)) if len(lowerCAmelCase__) != len(lowerCAmelCase__): raise Exception( f'Numbers of operations are different. Source module has {len(lowerCAmelCase__)} operations while' f' destination module has {len(lowerCAmelCase__)}.') for dest_m, src_m in zip(lowerCAmelCase__, lowerCAmelCase__): dest_m.load_state_dict(src_m.state_dict()) if self.verbose == 1: print(f'Transfered from={src_m} to={dest_m}') def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = True ) -> List[str]: print(f'Converting {name}...' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCAmelCase , pretrained=UpperCAmelCase ).eval() snake_case_ = ResNetForImageClassification(UpperCAmelCase ).eval() snake_case_ = ModuleTransfer(src=UpperCAmelCase , dest=UpperCAmelCase ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCAmelCase ) assert torch.allclose(from_model(UpperCAmelCase ) , our_model(UpperCAmelCase ).logits ), "The model logits don't match the original one." snake_case_ = f'resnet{"-".join(name.split("resnet" ) )}' print(UpperCAmelCase ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase , ) print(f'Pushed {checkpoint_name}' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = True ) -> Tuple: snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCAmelCase , num_labels=UpperCAmelCase , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCAmelCase , names_to_config[model_name] , UpperCAmelCase , UpperCAmelCase ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) return config, expected_shape if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default=None, type=str, help=( '''The name of the model you wish to convert, it must be one of the supported resnet* architecture,''' ''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.''' ), ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=Path, required=True, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=True, type=bool, required=False, help='''If True, push model and image processor to the hub.''', ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["keras_nlp"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['keras_nlp'])
312
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''facebook/convnextv2-tiny-1k-224''': '''https://huggingface.co/facebook/convnextv2-tiny-1k-224/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "convnextv2" def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="gelu", lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=0.0, lowerCAmelCase__=224, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Any: super().__init__(**lowerCAmelCase__) snake_case_ = num_channels snake_case_ = patch_size snake_case_ = num_stages snake_case_ = [96, 192, 384, 768] if hidden_sizes is None else hidden_sizes snake_case_ = [3, 3, 9, 3] if depths is None else depths snake_case_ = hidden_act snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = drop_path_rate snake_case_ = image_size snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(self.depths) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
312
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
1
"""simple docstring""" from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = DistilBertTokenizer SCREAMING_SNAKE_CASE_ = DistilBertTokenizerFast SCREAMING_SNAKE_CASE_ = True @slow def a_ ( self) -> str: snake_case_ = DistilBertTokenizer.from_pretrained('distilbert-base-uncased') snake_case_ = tokenizer.encode('sequence builders', add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.encode('multi-sequence build', add_special_tokens=lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
312
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = BarthezTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
1
"""simple docstring""" from . import __version__ # Backward compatibility imports, to make sure all those objects can be found in file_utils from .utils import ( CLOUDFRONT_DISTRIB_PREFIX, CONFIG_NAME, DISABLE_TELEMETRY, DUMMY_INPUTS, DUMMY_MASK, ENV_VARS_TRUE_AND_AUTO_VALUES, ENV_VARS_TRUE_VALUES, FEATURE_EXTRACTOR_NAME, FLAX_WEIGHTS_NAME, HF_MODULES_CACHE, HUGGINGFACE_CO_PREFIX, HUGGINGFACE_CO_RESOLVE_ENDPOINT, MODEL_CARD_NAME, MULTIPLE_CHOICE_DUMMY_INPUTS, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, S3_BUCKET_PREFIX, SENTENCEPIECE_UNDERLINE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TORCH_FX_REQUIRED_VERSION, TRANSFORMERS_CACHE, TRANSFORMERS_DYNAMIC_MODULE_NAME, USE_JAX, USE_TF, USE_TORCH, WEIGHTS_INDEX_NAME, WEIGHTS_NAME, ContextManagers, DummyObject, EntryNotFoundError, ExplicitEnum, ModelOutput, PaddingStrategy, PushToHubMixin, RepositoryNotFoundError, RevisionNotFoundError, TensorType, _LazyModule, add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, cached_property, copy_func, default_cache_path, define_sagemaker_information, get_cached_models, get_file_from_repo, get_full_repo_name, get_torch_version, has_file, http_user_agent, is_apex_available, is_bsa_available, is_coloredlogs_available, is_datasets_available, is_detectrona_available, is_faiss_available, is_flax_available, is_ftfy_available, is_in_notebook, is_ipex_available, is_librosa_available, is_offline_mode, is_onnx_available, is_pandas_available, is_phonemizer_available, is_protobuf_available, is_psutil_available, is_pyanvml_available, is_pyctcdecode_available, is_pytesseract_available, is_pytorch_quantization_available, is_rjieba_available, is_sagemaker_dp_enabled, is_sagemaker_mp_enabled, is_scipy_available, is_sentencepiece_available, is_seqio_available, is_sklearn_available, is_soundfile_availble, is_spacy_available, is_speech_available, is_tensor, is_tensorflow_probability_available, is_tfaonnx_available, is_tf_available, is_timm_available, is_tokenizers_available, is_torch_available, is_torch_bfaa_available, is_torch_cuda_available, is_torch_fx_available, is_torch_fx_proxy, is_torch_mps_available, is_torch_tfaa_available, is_torch_tpu_available, is_torchaudio_available, is_training_run_on_sagemaker, is_vision_available, replace_return_docstrings, requires_backends, to_numpy, to_py_obj, torch_only_method, )
312
"""simple docstring""" import functools def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: # Validation if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase ) == 0: return 0 if min(UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case_ = set(UpperCAmelCase ) @functools.cache def dynamic_programming(UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = BarthezTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
"""simple docstring""" import copy import re class UpperCamelCase : SCREAMING_SNAKE_CASE_ = "hp" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None @classmethod def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = prefix snake_case_ = defaults cls.build_naming_info() @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: if len(lowerCAmelCase__) == 0: return "" snake_case_ = None if any(char.isdigit() for char in word): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(lowerCAmelCase__) + 1): snake_case_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: snake_case_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase__): snake_case_ = '' while integer != 0: snake_case_ = chr(ord('A') + integer % 10) + s integer //= 10 return s snake_case_ = 0 while True: snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__) if sword in info["reverse_short_word"]: continue else: snake_case_ = sword break snake_case_ = short_word snake_case_ = word return short_word @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = param_name.split('_') snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name snake_case_ = ['', '_'] for separator in separators: snake_case_ = separator.join(lowerCAmelCase__) if shortname not in info["reverse_short_param"]: snake_case_ = shortname snake_case_ = param_name return shortname return param_name @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = short_name snake_case_ = param_name @classmethod def a_ ( cls) -> List[str]: if cls.NAMING_INFO is not None: return snake_case_ = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } snake_case_ = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = info @classmethod def a_ ( cls, lowerCAmelCase__) -> List[Any]: cls.build_naming_info() assert cls.PREFIX is not None snake_case_ = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue snake_case_ = cls.NAMING_INFO['short_param'][k] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = 1 if v else 0 snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-' snake_case_ = f'{key}{sep}{v}' name.append(lowerCAmelCase__) return "_".join(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__) -> Optional[Any]: snake_case_ = repr[len(cls.PREFIX) + 1 :] if repr == "": snake_case_ = [] else: snake_case_ = repr.split('_') snake_case_ = {} for value in values: if "-" in value: snake_case_ , snake_case_ = value.split('-') else: snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__) snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__)) snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k] snake_case_ = p_v for k in cls.DEFAULTS: if k not in parameters: snake_case_ = cls.DEFAULTS[k] return parameters
312
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''facebook/deit-base-distilled-patch16-224''': ( '''https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json''' ), # See all DeiT models at https://huggingface.co/models?filter=deit } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "deit" def __init__( self, lowerCAmelCase__=768, lowerCAmelCase__=12, lowerCAmelCase__=12, lowerCAmelCase__=3072, lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=224, lowerCAmelCase__=16, lowerCAmelCase__=3, lowerCAmelCase__=True, lowerCAmelCase__=16, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = image_size snake_case_ = patch_size snake_case_ = num_channels snake_case_ = qkv_bias snake_case_ = encoder_stride class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-4
312
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = str(UpperCAmelCase ) dataset_info.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfo.from_directory(UpperCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCAmelCase ) snake_case_ = yaml.safe_load(UpperCAmelCase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = str(UpperCAmelCase ) dataset_infos_dict.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
312
1
"""simple docstring""" import math from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import SchedulerMixin, SchedulerOutput class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = 1 @register_to_config def __init__( self, lowerCAmelCase__ = 1000, lowerCAmelCase__ = None) -> Dict: # set `betas`, `alphas`, `timesteps` self.set_timesteps(lowerCAmelCase__) # standard deviation of the initial noise distribution snake_case_ = 1.0 # For now we only support F-PNDM, i.e. the runge-kutta method # For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf # mainly at formula (9), (12), (13) and the Algorithm 2. snake_case_ = 4 # running values snake_case_ = [] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> int: snake_case_ = num_inference_steps snake_case_ = torch.linspace(1, 0, num_inference_steps + 1)[:-1] snake_case_ = torch.cat([steps, torch.tensor([0.0])]) if self.config.trained_betas is not None: snake_case_ = torch.tensor(self.config.trained_betas, dtype=torch.floataa) else: snake_case_ = torch.sin(steps * math.pi / 2) ** 2 snake_case_ = (1.0 - self.betas**2) ** 0.5 snake_case_ = (torch.atana(self.betas, self.alphas) / math.pi * 2)[:-1] snake_case_ = timesteps.to(lowerCAmelCase__) snake_case_ = [] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = True, ) -> Union[SchedulerOutput, Tuple]: if self.num_inference_steps is None: raise ValueError( 'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler') snake_case_ = (self.timesteps == timestep).nonzero().item() snake_case_ = timestep_index + 1 snake_case_ = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index] self.ets.append(lowerCAmelCase__) if len(self.ets) == 1: snake_case_ = self.ets[-1] elif len(self.ets) == 2: snake_case_ = (3 * self.ets[-1] - self.ets[-2]) / 2 elif len(self.ets) == 3: snake_case_ = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12 else: snake_case_ = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4]) snake_case_ = self._get_prev_sample(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__) -> torch.FloatTensor: return sample def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any: snake_case_ = self.alphas[timestep_index] snake_case_ = self.betas[timestep_index] snake_case_ = self.alphas[prev_timestep_index] snake_case_ = self.betas[prev_timestep_index] snake_case_ = (sample - sigma * ets) / max(lowerCAmelCase__, 1e-8) snake_case_ = next_alpha * pred + ets * next_sigma return prev_sample def __len__( self) -> Tuple: return self.config.num_train_timesteps
312
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE_ = frozenset([] ) def a_ ( self) -> Any: torch.manual_seed(0) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, ) snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) torch.manual_seed(0) snake_case_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0) snake_case_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, ) snake_case_ = CLIPTextModel(lowerCAmelCase__) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') snake_case_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) snake_case_ = image.cpu().permute(0, 2, 3, 1)[0] snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64)) snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64)) if str(lowerCAmelCase__).startswith('mps'): snake_case_ = torch.manual_seed(lowerCAmelCase__) else: snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a_ ( self) -> Dict: snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__) snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs(lowerCAmelCase__) snake_case_ = sd_pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> Union[str, Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def a_ ( self) -> Optional[int]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def a_ ( self) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler') snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
312
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "resnet" SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}') snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = downsample_in_first_stage snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-3
312
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} ) SCREAMING_SNAKE_CASE_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: snake_case_ = processors[data_args.task_name]() snake_case_ = processor.get_labels() snake_case_ = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self, lowerCAmelCase__, lowerCAmelCase__=7, lowerCAmelCase__=3, lowerCAmelCase__=30, lowerCAmelCase__=400, lowerCAmelCase__=True, lowerCAmelCase__=None, lowerCAmelCase__=0.9, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__=[0.5, 0.5, 0.5], lowerCAmelCase__=[0.5, 0.5, 0.5], ) -> Union[str, Any]: snake_case_ = size if size is not None else {'shortest_edge': 30} snake_case_ = crop_size if crop_size is not None else {'height': 30, 'width': 30} snake_case_ = parent snake_case_ = batch_size snake_case_ = num_channels snake_case_ = min_resolution snake_case_ = max_resolution snake_case_ = do_resize_and_center_crop snake_case_ = size snake_case_ = crop_pct snake_case_ = crop_size snake_case_ = do_normalize snake_case_ = image_mean snake_case_ = image_std def a_ ( self) -> Union[str, Any]: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = PoolFormerImageProcessor if is_vision_available() else None def a_ ( self) -> List[str]: snake_case_ = PoolFormerImageProcessingTester(self) @property def a_ ( self) -> Optional[Any]: return self.image_processor_tester.prepare_image_processor_dict() def a_ ( self) -> Optional[Any]: snake_case_ = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(lowerCAmelCase__, 'do_resize_and_center_crop')) self.assertTrue(hasattr(lowerCAmelCase__, 'size')) self.assertTrue(hasattr(lowerCAmelCase__, 'crop_pct')) self.assertTrue(hasattr(lowerCAmelCase__, 'do_normalize')) self.assertTrue(hasattr(lowerCAmelCase__, 'image_mean')) self.assertTrue(hasattr(lowerCAmelCase__, 'image_std')) def a_ ( self) -> Union[str, Any]: snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {'shortest_edge': 30}) self.assertEqual(image_processor.crop_size, {'height': 30, 'width': 30}) snake_case_ = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {'shortest_edge': 42}) self.assertEqual(image_processor.crop_size, {'height': 84, 'width': 84}) def a_ ( self) -> int: pass def a_ ( self) -> Optional[Any]: # Initialize image_processing snake_case_ = self.image_processing_class(**self.image_processor_dict) # create random PIL images snake_case_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__, Image.Image) # Test not batched input snake_case_ = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case_ = image_processing(lowerCAmelCase__, return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def a_ ( self) -> Tuple: # Initialize image_processing snake_case_ = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors snake_case_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, numpify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__, np.ndarray) # Test not batched input snake_case_ = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case_ = image_processing(lowerCAmelCase__, return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) def a_ ( self) -> str: # Initialize image_processing snake_case_ = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors snake_case_ = prepare_image_inputs(self.image_processor_tester, equal_resolution=lowerCAmelCase__, torchify=lowerCAmelCase__) for image in image_inputs: self.assertIsInstance(lowerCAmelCase__, torch.Tensor) # Test not batched input snake_case_ = image_processing(image_inputs[0], return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), ) # Test batched snake_case_ = image_processing(lowerCAmelCase__, return_tensors='pt').pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ), )
312
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
1
"""simple docstring""" import unittest import torch from diffusers import VQModel from diffusers.utils import floats_tensor, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin, UNetTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = VQModel SCREAMING_SNAKE_CASE_ = "sample" @property def a_ ( self, lowerCAmelCase__=(32, 32)) -> Dict: snake_case_ = 4 snake_case_ = 3 snake_case_ = floats_tensor((batch_size, num_channels) + sizes).to(lowerCAmelCase__) return {"sample": image} @property def a_ ( self) -> int: return (3, 32, 32) @property def a_ ( self) -> Dict: return (3, 32, 32) def a_ ( self) -> Dict: snake_case_ = { 'block_out_channels': [32, 64], 'in_channels': 3, 'out_channels': 3, 'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'], 'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'], 'latent_channels': 3, } snake_case_ = self.dummy_input return init_dict, inputs_dict def a_ ( self) -> Dict: pass def a_ ( self) -> Any: pass def a_ ( self) -> List[Any]: snake_case_ , snake_case_ = VQModel.from_pretrained('fusing/vqgan-dummy', output_loading_info=lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) self.assertEqual(len(loading_info['missing_keys']), 0) model.to(lowerCAmelCase__) snake_case_ = model(**self.dummy_input) assert image is not None, "Make sure output is not None" def a_ ( self) -> Tuple: snake_case_ = VQModel.from_pretrained('fusing/vqgan-dummy') model.to(lowerCAmelCase__).eval() torch.manual_seed(0) if torch.cuda.is_available(): torch.cuda.manual_seed_all(0) snake_case_ = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size) snake_case_ = image.to(lowerCAmelCase__) with torch.no_grad(): snake_case_ = model(lowerCAmelCase__).sample snake_case_ = output[0, -1, -3:, -3:].flatten().cpu() # fmt: off snake_case_ = torch.tensor([-0.0153, -0.4044, -0.1880, -0.5161, -0.2418, -0.4072, -0.1612, -0.0633, -0.0143]) # fmt: on self.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-3))
312
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict: snake_case_ = [] for old_item in old_list: snake_case_ = old_item.replace('in_layers.0' , 'norm1' ) snake_case_ = new_item.replace('in_layers.2' , 'conv1' ) snake_case_ = new_item.replace('out_layers.0' , 'norm2' ) snake_case_ = new_item.replace('out_layers.3' , 'conv2' ) snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' ) snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]: snake_case_ = [] for old_item in old_list: snake_case_ = old_item snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' ) snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' ) snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]: assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case_ = old_checkpoint[path] snake_case_ = old_tensor.shape[0] // 3 snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3 snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 ) snake_case_ = query.reshape(UpperCAmelCase ) snake_case_ = key.reshape(UpperCAmelCase ) snake_case_ = value.reshape(UpperCAmelCase ) for path in paths: snake_case_ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case_ = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case_ = old_checkpoint[path['old']][:, :, 0] else: snake_case_ = old_checkpoint[path['old']] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = {} snake_case_ = checkpoint['time_embed.0.weight'] snake_case_ = checkpoint['time_embed.0.bias'] snake_case_ = checkpoint['time_embed.2.weight'] snake_case_ = checkpoint['time_embed.2.bias'] snake_case_ = checkpoint['input_blocks.0.0.weight'] snake_case_ = checkpoint['input_blocks.0.0.bias'] snake_case_ = checkpoint['out.0.weight'] snake_case_ = checkpoint['out.0.bias'] snake_case_ = checkpoint['out.2.weight'] snake_case_ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the middle blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the output blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } for i in range(1 , UpperCAmelCase ): snake_case_ = (i - 1) // (config['num_res_blocks'] + 1) snake_case_ = (i - 1) % (config['num_res_blocks'] + 1) snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.weight' ] snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase ) if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , ) snake_case_ = middle_blocks[0] snake_case_ = middle_blocks[1] snake_case_ = middle_blocks[2] snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase ) for i in range(UpperCAmelCase ): snake_case_ = i // (config['num_res_blocks'] + 1) snake_case_ = i % (config['num_res_blocks'] + 1) snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]] snake_case_ = {} for layer in output_block_layers: snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCAmelCase ) else: snake_case_ = [layer_name] if len(UpperCAmelCase ) > 1: snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(UpperCAmelCase ) == 2: snake_case_ = [] if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , ) else: snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] ) snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] ) snake_case_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __UpperCamelCase = json.loads(f.read()) __UpperCamelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __UpperCamelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
312
1
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # vision encoder if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCAmelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCAmelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def UpperCAmelCase ( ) -> Any: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int: snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCAmelCase ).eval() snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) processor.save_pretrained(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) print('Successfully saved processor and model to' , UpperCAmelCase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase , organization='nielsr' ) model.push_to_hub(UpperCAmelCase , organization='nielsr' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) __UpperCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from timm import create_model from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import BitConfig, BitForImageClassification, BitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) def UpperCAmelCase ( UpperCAmelCase ) -> Dict: snake_case_ = 'huggingface/label-files' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = json.load(open(hf_hub_download(UpperCAmelCase , UpperCAmelCase , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCAmelCase ): v for k, v in idalabel.items()} snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = 'std_conv' if 'bit' in model_name else False # note that when using BiT as backbone for ViT-hybrid checkpoints, # one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same", # config.conv_layer = "std_conv_same" snake_case_ = BitConfig( conv_layer=UpperCAmelCase , num_labels=1000 , idalabel=UpperCAmelCase , labelaid=UpperCAmelCase , ) return config def UpperCAmelCase ( UpperCAmelCase ) -> List[str]: if "stem.conv" in name: snake_case_ = name.replace('stem.conv' , 'bit.embedder.convolution' ) if "blocks" in name: snake_case_ = name.replace('blocks' , 'layers' ) if "head.fc" in name: snake_case_ = name.replace('head.fc' , 'classifier.1' ) if name.startswith('norm' ): snake_case_ = 'bit.' + name if "bit" not in name and "classifier" not in name: snake_case_ = 'bit.encoder.' + name return name def UpperCAmelCase ( ) -> List[str]: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False ) -> Any: snake_case_ = get_config(UpperCAmelCase ) # load original model from timm snake_case_ = create_model(UpperCAmelCase , pretrained=UpperCAmelCase ) timm_model.eval() # load state_dict of original model snake_case_ = timm_model.state_dict() for key in state_dict.copy().keys(): snake_case_ = state_dict.pop(UpperCAmelCase ) snake_case_ = val.squeeze() if 'head' in key else val # load HuggingFace model snake_case_ = BitForImageClassification(UpperCAmelCase ) model.eval() model.load_state_dict(UpperCAmelCase ) # create image processor snake_case_ = create_transform(**resolve_data_config({} , model=UpperCAmelCase ) ) snake_case_ = transform.transforms snake_case_ = { 'bilinear': PILImageResampling.BILINEAR, 'bicubic': PILImageResampling.BICUBIC, 'nearest': PILImageResampling.NEAREST, } snake_case_ = BitImageProcessor( do_resize=UpperCAmelCase , size={'shortest_edge': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=UpperCAmelCase , crop_size={'height': timm_transforms[1].size[0], 'width': timm_transforms[1].size[1]} , do_normalize=UpperCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , ) snake_case_ = prepare_img() snake_case_ = transform(UpperCAmelCase ).unsqueeze(0 ) snake_case_ = processor(UpperCAmelCase , return_tensors='pt' ).pixel_values # verify pixel values assert torch.allclose(UpperCAmelCase , UpperCAmelCase ) # verify logits with torch.no_grad(): snake_case_ = model(UpperCAmelCase ) snake_case_ = outputs.logits print('Logits:' , logits[0, :3] ) print('Predicted class:' , model.config.idalabel[logits.argmax(-1 ).item()] ) snake_case_ = timm_model(UpperCAmelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(UpperCAmelCase , outputs.logits , atol=1e-3 ) print('Looks ok!' ) if pytorch_dump_folder_path is not None: Path(UpperCAmelCase ).mkdir(exist_ok=UpperCAmelCase ) print(f'Saving model {model_name} and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCAmelCase ) processor.save_pretrained(UpperCAmelCase ) if push_to_hub: print(f'Pushing model {model_name} and processor to the hub' ) model.push_to_hub(f'ybelkada/{model_name}' ) processor.push_to_hub(f'ybelkada/{model_name}' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''resnetv2_50x1_bitm''', type=str, help='''Name of the BiT timm model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether to push the model to the hub.''', ) __UpperCamelCase = parser.parse_args() convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
312
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available from transformers.models.bert.tokenization_bert import BertTokenizer from transformers.testing_utils import require_tensorflow_text, require_tf, slow if is_tf_available(): import tensorflow as tf if is_tensorflow_text_available(): from transformers.models.bert import TFBertTokenizer __UpperCamelCase = ['''bert-base-uncased''', '''bert-base-cased'''] __UpperCamelCase = '''hf-internal-testing/tiny-bert-tf-only''' if is_tf_available(): class UpperCamelCase ( tf.keras.Model ): def __init__( self, lowerCAmelCase__) -> Dict: super().__init__() snake_case_ = tokenizer snake_case_ = AutoConfig.from_pretrained(lowerCAmelCase__) snake_case_ = TFAutoModel.from_config(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> List[str]: snake_case_ = self.tokenizer(lowerCAmelCase__) snake_case_ = self.bert(**lowerCAmelCase__) return out["pooler_output"] @require_tf @require_tensorflow_text class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: super().setUp() snake_case_ = [ BertTokenizer.from_pretrained(lowerCAmelCase__) for checkpoint in (TOKENIZER_CHECKPOINTS * 2) ] # repeat for when fast_bert_tokenizer=false snake_case_ = [TFBertTokenizer.from_pretrained(lowerCAmelCase__) for checkpoint in TOKENIZER_CHECKPOINTS] + [ TFBertTokenizer.from_pretrained(lowerCAmelCase__, use_fast_bert_tokenizer=lowerCAmelCase__) for checkpoint in TOKENIZER_CHECKPOINTS ] assert len(self.tokenizers) == len(self.tf_tokenizers) snake_case_ = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] snake_case_ = list(zip(self.test_sentences, self.test_sentences[::-1])) def a_ ( self) -> List[str]: for tokenizer, tf_tokenizer in zip(self.tokenizers, self.tf_tokenizers): for test_inputs in (self.test_sentences, self.paired_sentences): snake_case_ = tokenizer(lowerCAmelCase__, return_tensors='tf', padding='longest') snake_case_ = tf_tokenizer(lowerCAmelCase__) for key in python_outputs.keys(): self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape)) self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key], tf.intaa) == tf_outputs[key])) @slow def a_ ( self) -> int: for tf_tokenizer in self.tf_tokenizers: snake_case_ = tf_tokenizer(self.paired_sentences) snake_case_ = tf_tokenizer( text=[sentence[0] for sentence in self.paired_sentences], text_pair=[sentence[1] for sentence in self.paired_sentences], ) for key in merged_outputs.keys(): self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key], tf.intaa) == separated_outputs[key])) @slow def a_ ( self) -> Optional[int]: for tf_tokenizer in self.tf_tokenizers: snake_case_ = tf.function(lowerCAmelCase__) for test_inputs in (self.test_sentences, self.paired_sentences): snake_case_ = tf.constant(lowerCAmelCase__) snake_case_ = compiled_tokenizer(lowerCAmelCase__) snake_case_ = tf_tokenizer(lowerCAmelCase__) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key])) @slow def a_ ( self) -> int: for tf_tokenizer in self.tf_tokenizers: snake_case_ = ModelToSave(tokenizer=lowerCAmelCase__) snake_case_ = tf.convert_to_tensor(self.test_sentences) snake_case_ = model(lowerCAmelCase__) # Build model with some sample inputs with TemporaryDirectory() as tempdir: snake_case_ = Path(lowerCAmelCase__) / 'saved.model' model.save(lowerCAmelCase__) snake_case_ = tf.keras.models.load_model(lowerCAmelCase__) snake_case_ = loaded_model(lowerCAmelCase__) # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output)), 1e-5)
312
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) snake_case_ = number_of_bytes // partitions snake_case_ = [] for i in range(UpperCAmelCase ): snake_case_ = i * bytes_per_partition + 1 snake_case_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> tuple[str, float]: if (stress, tangential_force, area).count(0 ) != 1: raise ValueError('You cannot supply more or less than 2 values' ) elif stress < 0: raise ValueError('Stress cannot be negative' ) elif tangential_force < 0: raise ValueError('Tangential Force cannot be negative' ) elif area < 0: raise ValueError('Area cannot be negative' ) elif stress == 0: return ( "stress", tangential_force / area, ) elif tangential_force == 0: return ( "tangential_force", stress * area, ) else: return ( "area", tangential_force / stress, ) if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" __UpperCamelCase = 256 # Modulus to hash a string __UpperCamelCase = 100_0003 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool: snake_case_ = len(UpperCAmelCase ) snake_case_ = len(UpperCAmelCase ) if p_len > t_len: return False snake_case_ = 0 snake_case_ = 0 snake_case_ = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCAmelCase ): snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: snake_case_ = 'abc1abc12' snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' snake_case_ = 'alskfjaldsk23adsfabcabc' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 2) snake_case_ = 'ABABX' snake_case_ = 'ABABZABABYABABX' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 3) snake_case_ = 'AAAB' snake_case_ = 'ABAAAAAB' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 4) snake_case_ = 'abcdabcy' snake_case_ = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 5) snake_case_ = 'Lü' snake_case_ = 'Lüsai' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'Lue' assert not rabin_karp(UpperCAmelCase , UpperCAmelCase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: return not any( neighbour == 1 and colored_vertices[i] == color for i, neighbour in enumerate(UpperCAmelCase ) ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # Base Case if index == len(UpperCAmelCase ): return True # Recursive Step for i in range(UpperCAmelCase ): if valid_coloring(graph[index] , UpperCAmelCase , UpperCAmelCase ): # Color current vertex snake_case_ = i # Validate coloring if util_color(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , index + 1 ): return True # Backtrack snake_case_ = -1 return False def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[int]: snake_case_ = [-1] * len(UpperCAmelCase ) if util_color(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , 0 ): return colored_vertices return []
312
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "resnet" SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}') snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = downsample_in_first_stage snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-3
312
1
"""simple docstring""" # NOTE: This file is deprecated and will be removed in a future version. # It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works from ...utils import deprecate from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401 from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401 deprecate( '''stable diffusion controlnet''', '''0.22.0''', '''Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.''', standard_warn=False, stacklevel=3, )
312
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = 'this is a test' snake_case_ = 'this is a test' return input_text, output_text def a_ ( self) -> Optional[int]: snake_case_ = '<pad>' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '<pad>') self.assertEqual(vocab_keys[1], '<unk>') self.assertEqual(vocab_keys[-1], '[PAD]') self.assertEqual(len(lowerCAmelCase__), 3_0001) def a_ ( self) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size, 3_0000) def a_ ( self) -> List[str]: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> str: pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> List[Any]: pass def a_ ( self) -> str: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> List[Any]: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Any: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = 'This is a test' snake_case_ = [13, 1, 4398, 25, 21, 1289] snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = DebertaVaTokenizer(lowerCAmelCase__) snake_case_ = tokenizer.encode('sequence builders') snake_case_ = tokenizer.encode('multi-sequence build') snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, ) @slow def a_ ( self) -> Union[str, Any]: # fmt: off snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
312
1
"""simple docstring""" from typing import Optional from .. import Features, NamedSplit from ..packaged_modules.text.text import Text from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = False, lowerCAmelCase__ = False, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> List[str]: super().__init__( lowerCAmelCase__, split=lowerCAmelCase__, features=lowerCAmelCase__, cache_dir=lowerCAmelCase__, keep_in_memory=lowerCAmelCase__, streaming=lowerCAmelCase__, num_proc=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = path_or_paths if isinstance(lowerCAmelCase__, lowerCAmelCase__) else {self.split: path_or_paths} snake_case_ = Text( cache_dir=lowerCAmelCase__, data_files=lowerCAmelCase__, features=lowerCAmelCase__, **lowerCAmelCase__, ) def a_ ( self) -> List[str]: # Build iterable dataset if self.streaming: snake_case_ = self.builder.as_streaming_dataset(split=self.split) # Build regular (map-style) dataset else: snake_case_ = None snake_case_ = None snake_case_ = None snake_case_ = None self.builder.download_and_prepare( download_config=lowerCAmelCase__, download_mode=lowerCAmelCase__, verification_mode=lowerCAmelCase__, base_path=lowerCAmelCase__, num_proc=self.num_proc, ) snake_case_ = self.builder.as_dataset( split=self.split, verification_mode=lowerCAmelCase__, in_memory=self.keep_in_memory) return dataset
312
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {} __UpperCamelCase = {} __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]: snake_case_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) snake_case_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) snake_case_ = format_type def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]: snake_case_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): snake_case_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: __UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: __UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: __UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter: snake_case_ = get_format_type_from_alias(UpperCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
312
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''', # See all Nat models at https://huggingface.co/models?filter=nat } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "nat" SCREAMING_SNAKE_CASE_ = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, lowerCAmelCase__=4, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[3, 4, 6, 5], lowerCAmelCase__=[2, 4, 8, 16], lowerCAmelCase__=7, lowerCAmelCase__=3.0, lowerCAmelCase__=True, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.1, lowerCAmelCase__="gelu", lowerCAmelCase__=0.02, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.0, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Optional[Any]: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size snake_case_ = num_channels snake_case_ = embed_dim snake_case_ = depths snake_case_ = len(lowerCAmelCase__) snake_case_ = num_heads snake_case_ = kernel_size snake_case_ = mlp_ratio snake_case_ = qkv_bias snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = drop_path_rate snake_case_ = hidden_act snake_case_ = layer_norm_eps snake_case_ = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model snake_case_ = int(embed_dim * 2 ** (len(lowerCAmelCase__) - 1)) snake_case_ = layer_scale_init_value snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names)
312
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __UpperCamelCase = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __UpperCamelCase = { '''RUCAIBox/mvp''': 1024, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = MvpTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type')) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**lowerCAmelCase__) snake_case_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ = 'post_processor' snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) if tokenizer_component_instance: snake_case_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ = tuple(state['sep']) if "cls" in state: snake_case_ = tuple(state['cls']) snake_case_ = False if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = add_prefix_space snake_case_ = True if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets: snake_case_ = trim_offsets snake_case_ = True if changes_to_apply: snake_case_ = getattr(lowerCAmelCase__, state.pop('type')) snake_case_ = component_class(**lowerCAmelCase__) setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) @property def a_ ( self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value snake_case_ = value def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str: snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
312
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["pixel_values"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BILINEAR, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = size if size is not None else {'shortest_edge': 256} snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__) snake_case_ = crop_size if crop_size is not None else {'height': 224, 'width': 224} snake_case_ = get_size_dict(lowerCAmelCase__) snake_case_ = do_resize snake_case_ = size snake_case_ = resample snake_case_ = do_center_crop snake_case_ = crop_size snake_case_ = do_rescale snake_case_ = rescale_factor snake_case_ = do_normalize snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BICUBIC, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}') snake_case_ = get_resize_output_image_size(lowerCAmelCase__, size=size['shortest_edge'], default_to_square=lowerCAmelCase__) return resize(lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: snake_case_ = get_size_dict(lowerCAmelCase__) return center_crop(lowerCAmelCase__, size=(size['height'], size['width']), data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> List[str]: snake_case_ = do_resize if do_resize is not None else self.do_resize snake_case_ = size if size is not None else self.size snake_case_ = get_size_dict(lowerCAmelCase__, default_to_square=lowerCAmelCase__) snake_case_ = resample if resample is not None else self.resample snake_case_ = do_center_crop if do_center_crop is not None else self.do_center_crop snake_case_ = crop_size if crop_size is not None else self.crop_size snake_case_ = get_size_dict(lowerCAmelCase__) snake_case_ = do_rescale if do_rescale is not None else self.do_rescale snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = image_mean if image_mean is not None else self.image_mean snake_case_ = image_std if image_std is not None else self.image_std snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_center_crop and crop_size is None: raise ValueError('Crop size must be specified if do_center_crop is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.') # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if do_resize: snake_case_ = [self.resize(image=lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__) for image in images] if do_center_crop: snake_case_ = [self.center_crop(image=lowerCAmelCase__, size=lowerCAmelCase__) for image in images] if do_rescale: snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__) for image in images] snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images] snake_case_ = {'pixel_values': images} return BatchFeature(data=lowerCAmelCase__, tensor_type=lowerCAmelCase__)
312
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a_ ( self, lowerCAmelCase__=0) -> List[Any]: snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__)) snake_case_ = np.random.RandomState(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def a_ ( self) -> List[str]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> str: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # warmup pass to apply optimizations snake_case_ = pipe(**self.get_dummy_inputs()) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> int: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): @property def a_ ( self) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self) -> str: snake_case_ = ort.SessionOptions() snake_case_ = False return options def a_ ( self) -> Any: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) # using the PNDM scheduler by default snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def a_ ( self) -> List[Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) snake_case_ = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx') snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase = 1000 ) -> int: snake_case_ = 2**power snake_case_ = 0 while n: snake_case_ , snake_case_ = r + n % 10, n // 10 return r if __name__ == "__main__": print(solution(int(str(input()).strip())))
312
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase = False __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''ybelkada/fonts''' def UpperCAmelCase ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' 'Pix2StructImageProcessor. Please upgrade torch.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: requires_backends(UpperCAmelCase , ['torch'] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image: requires_backends(UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=UpperCAmelCase ) snake_case_ = '\n'.join(UpperCAmelCase ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(UpperCAmelCase ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' ) snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase ) snake_case_ = ImageDraw.Draw(UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(UpperCAmelCase ) if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) return new_image class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["flattened_patches"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST) snake_case_ = torch.from_numpy(lowerCAmelCase__) snake_case_ , snake_case_ = patch_size['height'], patch_size['width'] snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1) snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1) snake_case_ = max(num_feasible_rows * patch_height, 1) snake_case_ = max(num_feasible_cols * patch_width, 1) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1]) snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa) snake_case_ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float() snake_case_ = to_numpy_array(lowerCAmelCase__) return result def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa) # take mean across the whole `image` snake_case_ = np.mean(lowerCAmelCase__) snake_case_ = np.std(lowerCAmelCase__) snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput: snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get('data_format', lowerCAmelCase__) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__) snake_case_ = kwargs.pop('font_path', lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [header_text] * len(lowerCAmelCase__) snake_case_ = [ render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] snake_case_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__) return encoded_outputs
312
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from itertools import chain from typing import Optional, Union import datasets import numpy as np import torch from datasets import load_dataset import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, set_seed, ) from transformers.tokenization_utils_base import PreTrainedTokenizerBase from transformers.trainer_utils import get_last_checkpoint from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') __UpperCamelCase = logging.getLogger(__name__) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , ) SCREAMING_SNAKE_CASE_ = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(default=lowerCAmelCase__ , metadata={"help": "The input training data file (a text file)."} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={ "help": ( "The maximum total input sequence length after tokenization. If passed, sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={ "help": ( "Whether to pad all samples to the maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) def a_ ( self) -> Optional[int]: if self.train_file is not None: snake_case_ = self.train_file.split('.')[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: snake_case_ = self.validation_file.split('.')[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = None SCREAMING_SNAKE_CASE_ = None def __call__( self, lowerCAmelCase__) -> str: snake_case_ = 'label' if 'label' in features[0].keys() else 'labels' snake_case_ = [feature.pop(lowerCAmelCase__) for feature in features] snake_case_ = len(lowerCAmelCase__) snake_case_ = len(features[0]['input_ids']) snake_case_ = [ [{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase__)] for feature in features ] snake_case_ = list(chain(*lowerCAmelCase__)) snake_case_ = self.tokenizer.pad( lowerCAmelCase__, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='pt', ) # Un-flatten snake_case_ = {k: v.view(lowerCAmelCase__, lowerCAmelCase__, -1) for k, v in batch.items()} # Add back labels snake_case_ = torch.tensor(lowerCAmelCase__, dtype=torch.intaa) return batch def UpperCAmelCase ( ) -> Dict: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. snake_case_ , snake_case_ , snake_case_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_swag' , UpperCAmelCase , UpperCAmelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() snake_case_ = training_args.get_process_log_level() logger.setLevel(UpperCAmelCase ) datasets.utils.logging.set_verbosity(UpperCAmelCase ) transformers.utils.logging.set_verbosity(UpperCAmelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' ) logger.info(f'Training/evaluation parameters {training_args}' ) # Detecting last checkpoint. snake_case_ = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: snake_case_ = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. ' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Set seed before initializing model. set_seed(training_args.seed ) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.train_file is not None or data_args.validation_file is not None: snake_case_ = {} if data_args.train_file is not None: snake_case_ = data_args.train_file if data_args.validation_file is not None: snake_case_ = data_args.validation_file snake_case_ = data_args.train_file.split('.' )[-1] snake_case_ = load_dataset( UpperCAmelCase , data_files=UpperCAmelCase , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) else: # Downloading and loading the swag dataset from the hub. snake_case_ = load_dataset( 'swag' , 'regular' , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Load pretrained model and tokenizer # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # When using your own dataset or a different dataset from swag, you will probably need to change this. snake_case_ = [f'ending{i}' for i in range(4 )] snake_case_ = 'sent1' snake_case_ = 'sent2' if data_args.max_seq_length is None: snake_case_ = tokenizer.model_max_length if max_seq_length > 1024: logger.warning( 'The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value' ' of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can' ' override this default with `--block_size xxx`.' ) snake_case_ = 1024 else: if data_args.max_seq_length > tokenizer.model_max_length: logger.warning( f'The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the' f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.' ) snake_case_ = min(data_args.max_seq_length , tokenizer.model_max_length ) # Preprocessing the datasets. def preprocess_function(UpperCAmelCase ): snake_case_ = [[context] * 4 for context in examples[context_name]] snake_case_ = examples[question_header_name] snake_case_ = [ [f'{header} {examples[end][i]}' for end in ending_names] for i, header in enumerate(UpperCAmelCase ) ] # Flatten out snake_case_ = list(chain(*UpperCAmelCase ) ) snake_case_ = list(chain(*UpperCAmelCase ) ) # Tokenize snake_case_ = tokenizer( UpperCAmelCase , UpperCAmelCase , truncation=UpperCAmelCase , max_length=UpperCAmelCase , padding='max_length' if data_args.pad_to_max_length else False , ) # Un-flatten return {k: [v[i : i + 4] for i in range(0 , len(UpperCAmelCase ) , 4 )] for k, v in tokenized_examples.items()} if training_args.do_train: if "train" not in raw_datasets: raise ValueError('--do_train requires a train dataset' ) snake_case_ = raw_datasets['train'] if data_args.max_train_samples is not None: snake_case_ = min(len(UpperCAmelCase ) , data_args.max_train_samples ) snake_case_ = train_dataset.select(range(UpperCAmelCase ) ) with training_args.main_process_first(desc='train dataset map pre-processing' ): snake_case_ = train_dataset.map( UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) if training_args.do_eval: if "validation" not in raw_datasets: raise ValueError('--do_eval requires a validation dataset' ) snake_case_ = raw_datasets['validation'] if data_args.max_eval_samples is not None: snake_case_ = min(len(UpperCAmelCase ) , data_args.max_eval_samples ) snake_case_ = eval_dataset.select(range(UpperCAmelCase ) ) with training_args.main_process_first(desc='validation dataset map pre-processing' ): snake_case_ = eval_dataset.map( UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , ) # Data collator snake_case_ = ( default_data_collator if data_args.pad_to_max_length else DataCollatorForMultipleChoice(tokenizer=UpperCAmelCase , pad_to_multiple_of=8 if training_args.fpaa else None ) ) # Metric def compute_metrics(UpperCAmelCase ): snake_case_ , snake_case_ = eval_predictions snake_case_ = np.argmax(UpperCAmelCase , axis=1 ) return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()} # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=UpperCAmelCase , data_collator=UpperCAmelCase , compute_metrics=UpperCAmelCase , ) # Training if training_args.do_train: snake_case_ = None if training_args.resume_from_checkpoint is not None: snake_case_ = training_args.resume_from_checkpoint elif last_checkpoint is not None: snake_case_ = last_checkpoint snake_case_ = trainer.train(resume_from_checkpoint=UpperCAmelCase ) trainer.save_model() # Saves the tokenizer too for easy upload snake_case_ = train_result.metrics snake_case_ = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(UpperCAmelCase ) ) snake_case_ = min(UpperCAmelCase , len(UpperCAmelCase ) ) trainer.log_metrics('train' , UpperCAmelCase ) trainer.save_metrics('train' , UpperCAmelCase ) trainer.save_state() # Evaluation if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(UpperCAmelCase ) snake_case_ = min(UpperCAmelCase , len(UpperCAmelCase ) ) trainer.log_metrics('eval' , UpperCAmelCase ) trainer.save_metrics('eval' , UpperCAmelCase ) snake_case_ = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'multiple-choice', 'dataset_tags': 'swag', 'dataset_args': 'regular', 'dataset': 'SWAG', 'language': 'en', } if training_args.push_to_hub: trainer.push_to_hub(**UpperCAmelCase ) else: trainer.create_model_card(**UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Union[str, Any]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
1
"""simple docstring""" import argparse import logging import sys from unittest.mock import patch import run_glue_deebert from transformers.testing_utils import TestCasePlus, get_gpu_count, require_torch_non_multi_gpu, slow logging.basicConfig(level=logging.DEBUG) __UpperCamelCase = logging.getLogger() def UpperCAmelCase ( ) -> str: snake_case_ = argparse.ArgumentParser() parser.add_argument('-f' ) snake_case_ = parser.parse_args() return args.f class UpperCamelCase ( lowerCAmelCase__ ): def a_ ( self) -> None: snake_case_ = logging.StreamHandler(sys.stdout) logger.addHandler(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = get_gpu_count() if n_gpu > 1: pass # XXX: doesn't quite work with n_gpu > 1 https://github.com/huggingface/transformers/issues/10560 # script = f"{self.examples_dir_str}/research_projects/deebert/run_glue_deebert.py" # distributed_args = f"-m torch.distributed.launch --nproc_per_node={n_gpu} {script}".split() # cmd = [sys.executable] + distributed_args + args # execute_subprocess_async(cmd, env=self.get_env()) # XXX: test the results - need to save them first into .json file else: args.insert(0, 'run_glue_deebert.py') with patch.object(lowerCAmelCase__, 'argv', lowerCAmelCase__): snake_case_ = run_glue_deebert.main() for value in result.values(): self.assertGreaterEqual(lowerCAmelCase__, 0.666) @slow @require_torch_non_multi_gpu def a_ ( self) -> Dict: snake_case_ = '\n --model_type roberta\n --model_name_or_path roberta-base\n --task_name MRPC\n --do_train\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --max_seq_length 128\n --per_gpu_eval_batch_size=1\n --per_gpu_train_batch_size=8\n --learning_rate 2e-4\n --num_train_epochs 3\n --overwrite_output_dir\n --seed 42\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --save_steps 0\n --overwrite_cache\n --eval_after_first_stage\n '.split() self.run_and_check(lowerCAmelCase__) snake_case_ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --eval_each_highway\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split() self.run_and_check(lowerCAmelCase__) snake_case_ = '\n --model_type roberta\n --model_name_or_path ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --task_name MRPC\n --do_eval\n --do_lower_case\n --data_dir ./tests/fixtures/tests_samples/MRPC/\n --output_dir ./examples/deebert/saved_models/roberta-base/MRPC/two_stage\n --plot_data_dir ./examples/deebert/results/\n --max_seq_length 128\n --early_exit_entropy 0.1\n --eval_highway\n --overwrite_cache\n --per_gpu_eval_batch_size=1\n '.split() self.run_and_check(lowerCAmelCase__)
312
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "trajectory_transformer" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = action_weight snake_case_ = reward_weight snake_case_ = value_weight snake_case_ = max_position_embeddings snake_case_ = block_size snake_case_ = action_dim snake_case_ = observation_dim snake_case_ = transition_dim snake_case_ = learning_rate snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_embd snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = resid_pdrop snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = kaiming_initializer_range snake_case_ = use_cache super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
1
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def UpperCAmelCase ( ) -> tuple[list[int], int]: snake_case_ = [randint(-1000 , 1000 ) for i in range(10 )] snake_case_ = randint(-5000 , 5000 ) return (arr, r) __UpperCamelCase = make_dataset() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> tuple[int, ...]: for triplet in permutations(UpperCAmelCase , 3 ): if sum(UpperCAmelCase ) == target: return tuple(sorted(UpperCAmelCase ) ) return (0, 0, 0) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> tuple[int, int, int]: arr.sort() snake_case_ = len(UpperCAmelCase ) for i in range(n - 1 ): snake_case_ , snake_case_ = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def UpperCAmelCase ( ) -> tuple[float, float]: snake_case_ = '\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n' snake_case_ = '\ntriplet_sum1(*dataset)\n' snake_case_ = '\ntriplet_sum2(*dataset)\n' snake_case_ = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10000 ) snake_case_ = repeat(setup=UpperCAmelCase , stmt=UpperCAmelCase , repeat=5 , number=10000 ) return (min(UpperCAmelCase ), min(UpperCAmelCase )) if __name__ == "__main__": from doctest import testmod testmod() __UpperCamelCase = solution_times() print(F"""The time for naive implementation is {times[0]}.""") print(F"""The time for optimized implementation is {times[1]}.""")
312
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["keras_nlp"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['keras_nlp'])
312
1
"""simple docstring""" from .imports import is_tqdm_available if is_tqdm_available(): from tqdm.auto import tqdm as _tqdm from ..state import PartialState def UpperCAmelCase ( UpperCAmelCase = True , *UpperCAmelCase , **UpperCAmelCase ) -> Dict: if not is_tqdm_available(): raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' ) snake_case_ = False if main_process_only: snake_case_ = PartialState().local_process_index == 0 return _tqdm(*UpperCAmelCase , **UpperCAmelCase , disable=UpperCAmelCase )
312
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
1
"""simple docstring""" import warnings from typing import List import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import is_flax_available, is_tf_available, is_torch_available class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["image_processor", "tokenizer"] SCREAMING_SNAKE_CASE_ = "OwlViTImageProcessor" SCREAMING_SNAKE_CASE_ = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Any: snake_case_ = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.', lowerCAmelCase__, ) snake_case_ = kwargs.pop('feature_extractor') snake_case_ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.') if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.') super().__init__(lowerCAmelCase__, lowerCAmelCase__) def __call__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="max_length", lowerCAmelCase__="np", **lowerCAmelCase__) -> str: if text is None and query_images is None and images is None: raise ValueError( 'You have to specify at least one text or query image or image. All three cannot be none.') if text is not None: if isinstance(lowerCAmelCase__, lowerCAmelCase__) or (isinstance(lowerCAmelCase__, lowerCAmelCase__) and not isinstance(text[0], lowerCAmelCase__)): snake_case_ = [self.tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__)] elif isinstance(lowerCAmelCase__, lowerCAmelCase__) and isinstance(text[0], lowerCAmelCase__): snake_case_ = [] # Maximum number of queries across batch snake_case_ = max([len(lowerCAmelCase__) for t in text]) # Pad all batch samples to max number of text queries for t in text: if len(lowerCAmelCase__) != max_num_queries: snake_case_ = t + [' '] * (max_num_queries - len(lowerCAmelCase__)) snake_case_ = self.tokenizer(lowerCAmelCase__, padding=lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) encodings.append(lowerCAmelCase__) else: raise TypeError('Input text should be a string, a list of strings or a nested list of strings') if return_tensors == "np": snake_case_ = np.concatenate([encoding['input_ids'] for encoding in encodings], axis=0) snake_case_ = np.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0) elif return_tensors == "jax" and is_flax_available(): import jax.numpy as jnp snake_case_ = jnp.concatenate([encoding['input_ids'] for encoding in encodings], axis=0) snake_case_ = jnp.concatenate([encoding['attention_mask'] for encoding in encodings], axis=0) elif return_tensors == "pt" and is_torch_available(): import torch snake_case_ = torch.cat([encoding['input_ids'] for encoding in encodings], dim=0) snake_case_ = torch.cat([encoding['attention_mask'] for encoding in encodings], dim=0) elif return_tensors == "tf" and is_tf_available(): import tensorflow as tf snake_case_ = tf.stack([encoding['input_ids'] for encoding in encodings], axis=0) snake_case_ = tf.stack([encoding['attention_mask'] for encoding in encodings], axis=0) else: raise ValueError('Target return tensor type could not be returned') snake_case_ = BatchEncoding() snake_case_ = input_ids snake_case_ = attention_mask if query_images is not None: snake_case_ = BatchEncoding() snake_case_ = self.image_processor( lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__).pixel_values snake_case_ = query_pixel_values if images is not None: snake_case_ = self.image_processor(lowerCAmelCase__, return_tensors=lowerCAmelCase__, **lowerCAmelCase__) if text is not None and images is not None: snake_case_ = image_features.pixel_values return encoding elif query_images is not None and images is not None: snake_case_ = image_features.pixel_values return encoding elif text is not None or query_images is not None: return encoding else: return BatchEncoding(data=dict(**lowerCAmelCase__), tensor_type=lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> List[str]: return self.image_processor.post_process(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]: return self.image_processor.post_process_object_detection(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: return self.image_processor.post_process_image_guided_detection(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]: return self.tokenizer.batch_decode(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]: return self.tokenizer.decode(*lowerCAmelCase__, **lowerCAmelCase__) @property def a_ ( self) -> Any: warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.', lowerCAmelCase__, ) return self.image_processor_class @property def a_ ( self) -> List[str]: warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.', lowerCAmelCase__, ) return self.image_processor
312
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = BarthezTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
1
"""simple docstring""" import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging __UpperCamelCase = logging.get_logger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = set() snake_case_ = [] def parse_line(UpperCAmelCase ): for line in fp: if isinstance(UpperCAmelCase , UpperCAmelCase ): snake_case_ = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(UpperCAmelCase ) > 0: snake_case_ = '\n'.join(UpperCAmelCase ) # Only keep the warnings specified in `targets` if any(f': {x}: ' in warning for x in targets ): selected_warnings.add(UpperCAmelCase ) buffer.clear() continue else: snake_case_ = line.strip() buffer.append(UpperCAmelCase ) if from_gh: for filename in os.listdir(UpperCAmelCase ): snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) if not os.path.isdir(UpperCAmelCase ): # read the file if filename != "warnings.txt": continue with open(UpperCAmelCase ) as fp: parse_line(UpperCAmelCase ) else: try: with zipfile.ZipFile(UpperCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCAmelCase ): # read the file if filename != "warnings.txt": continue with z.open(UpperCAmelCase ) as fp: parse_line(UpperCAmelCase ) except Exception: logger.warning( f'{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.' ) return selected_warnings def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = set() snake_case_ = [os.path.join(UpperCAmelCase , UpperCAmelCase ) for p in os.listdir(UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(UpperCAmelCase , UpperCAmelCase ) ) return selected_warnings if __name__ == "__main__": def UpperCAmelCase ( UpperCAmelCase ) -> List[str]: return values.split(',' ) __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') # optional parameters parser.add_argument( '''--targets''', default='''DeprecationWarning,UserWarning,FutureWarning''', type=list_str, help='''Comma-separated list of target warning(s) which we want to extract.''', ) parser.add_argument( '''--from_gh''', action='''store_true''', help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''', ) __UpperCamelCase = parser.parse_args() __UpperCamelCase = args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links __UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print('''=''' * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts __UpperCamelCase = extract_warnings(args.output_dir, args.targets) __UpperCamelCase = sorted(selected_warnings) with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
312
"""simple docstring""" import functools def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: # Validation if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase ) == 0: return 0 if min(UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case_ = set(UpperCAmelCase ) @functools.cache def dynamic_programming(UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __UpperCamelCase = logging.get_logger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: snake_case_ = WavaVecaForSequenceClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = downstream_dict['projector.weight'] snake_case_ = downstream_dict['projector.bias'] snake_case_ = downstream_dict['model.post_net.linear.weight'] snake_case_ = downstream_dict['model.post_net.linear.bias'] return model def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: snake_case_ = WavaVecaForAudioFrameClassification.from_pretrained(UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = downstream_dict['model.linear.weight'] snake_case_ = downstream_dict['model.linear.bias'] return model def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: snake_case_ = WavaVecaForXVector.from_pretrained(UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = downstream_dict['connector.weight'] snake_case_ = downstream_dict['connector.bias'] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): snake_case_ = downstream_dict[ f'model.framelevel_feature_extractor.module.{i}.kernel.weight' ] snake_case_ = downstream_dict[f'model.framelevel_feature_extractor.module.{i}.kernel.bias'] snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.weight'] snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear1.bias'] snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.weight'] snake_case_ = downstream_dict['model.utterancelevel_feature_extractor.linear2.bias'] snake_case_ = downstream_dict['objective.W'] return model @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Optional[int]: snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' ) snake_case_ = checkpoint['Downstream'] snake_case_ = WavaVecaConfig.from_pretrained(UpperCAmelCase ) snake_case_ = WavaVecaFeatureExtractor.from_pretrained( UpperCAmelCase , return_attention_mask=UpperCAmelCase , do_normalize=UpperCAmelCase ) snake_case_ = hf_config.architectures[0] if arch.endswith('ForSequenceClassification' ): snake_case_ = convert_classification(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif arch.endswith('ForAudioFrameClassification' ): snake_case_ = convert_diarization(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) elif arch.endswith('ForXVector' ): snake_case_ = convert_xvector(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) else: raise NotImplementedError(f'S3PRL weights conversion is not supported for {arch}' ) if hf_config.use_weighted_layer_sum: snake_case_ = checkpoint['Featurizer']['weights'] hf_feature_extractor.save_pretrained(UpperCAmelCase ) hf_model.save_pretrained(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--base_model_name''', default=None, type=str, help='''Name of the huggingface pretrained base model.''' ) parser.add_argument('''--config_path''', default=None, type=str, help='''Path to the huggingface classifier config.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to the s3prl checkpoint.''') parser.add_argument('''--model_dump_path''', default=None, type=str, help='''Path to the final converted model.''') __UpperCamelCase = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
312
"""simple docstring""" import copy import re class UpperCamelCase : SCREAMING_SNAKE_CASE_ = "hp" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None @classmethod def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = prefix snake_case_ = defaults cls.build_naming_info() @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: if len(lowerCAmelCase__) == 0: return "" snake_case_ = None if any(char.isdigit() for char in word): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(lowerCAmelCase__) + 1): snake_case_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: snake_case_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase__): snake_case_ = '' while integer != 0: snake_case_ = chr(ord('A') + integer % 10) + s integer //= 10 return s snake_case_ = 0 while True: snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__) if sword in info["reverse_short_word"]: continue else: snake_case_ = sword break snake_case_ = short_word snake_case_ = word return short_word @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = param_name.split('_') snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name snake_case_ = ['', '_'] for separator in separators: snake_case_ = separator.join(lowerCAmelCase__) if shortname not in info["reverse_short_param"]: snake_case_ = shortname snake_case_ = param_name return shortname return param_name @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = short_name snake_case_ = param_name @classmethod def a_ ( cls) -> List[str]: if cls.NAMING_INFO is not None: return snake_case_ = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } snake_case_ = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = info @classmethod def a_ ( cls, lowerCAmelCase__) -> List[Any]: cls.build_naming_info() assert cls.PREFIX is not None snake_case_ = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue snake_case_ = cls.NAMING_INFO['short_param'][k] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = 1 if v else 0 snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-' snake_case_ = f'{key}{sep}{v}' name.append(lowerCAmelCase__) return "_".join(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__) -> Optional[Any]: snake_case_ = repr[len(cls.PREFIX) + 1 :] if repr == "": snake_case_ = [] else: snake_case_ = repr.split('_') snake_case_ = {} for value in values: if "-" in value: snake_case_ , snake_case_ = value.split('-') else: snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__) snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__)) snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k] snake_case_ = p_v for k in cls.DEFAULTS: if k not in parameters: snake_case_ = cls.DEFAULTS[k] return parameters
312
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["torch", "transformers", "onnx"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]: requires_backends(self, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]: requires_backends(cls, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[int]: requires_backends(cls, ['torch', 'transformers', 'onnx']) class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["torch", "transformers", "onnx"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]: requires_backends(cls, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> List[str]: requires_backends(cls, ['torch', 'transformers', 'onnx']) class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["torch", "transformers", "onnx"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Tuple: requires_backends(self, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> str: requires_backends(cls, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(cls, ['torch', 'transformers', 'onnx']) class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["torch", "transformers", "onnx"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> List[str]: requires_backends(self, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]: requires_backends(cls, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> List[str]: requires_backends(cls, ['torch', 'transformers', 'onnx']) class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["torch", "transformers", "onnx"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Tuple: requires_backends(self, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(cls, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> Any: requires_backends(cls, ['torch', 'transformers', 'onnx']) class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["torch", "transformers", "onnx"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[int]: requires_backends(self, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> List[str]: requires_backends(cls, ['torch', 'transformers', 'onnx']) @classmethod def a_ ( cls, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[int]: requires_backends(cls, ['torch', 'transformers', 'onnx'])
312
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = str(UpperCAmelCase ) dataset_info.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfo.from_directory(UpperCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCAmelCase ) snake_case_ = yaml.safe_load(UpperCAmelCase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = str(UpperCAmelCase ) dataset_infos_dict.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase = 50 ) -> int: snake_case_ = [[0] * 3 for _ in range(length + 1 )] for row_length in range(length + 1 ): for tile_length in range(2 , 5 ): for tile_start in range(row_length - tile_length + 1 ): different_colour_ways_number[row_length][tile_length - 2] += ( different_colour_ways_number[row_length - tile_start - tile_length][ tile_length - 2 ] + 1 ) return sum(different_colour_ways_number[length] ) if __name__ == "__main__": print(F"""{solution() = }""")
312
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE_ = frozenset([] ) def a_ ( self) -> Any: torch.manual_seed(0) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, ) snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) torch.manual_seed(0) snake_case_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0) snake_case_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, ) snake_case_ = CLIPTextModel(lowerCAmelCase__) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') snake_case_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) snake_case_ = image.cpu().permute(0, 2, 3, 1)[0] snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64)) snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64)) if str(lowerCAmelCase__).startswith('mps'): snake_case_ = torch.manual_seed(lowerCAmelCase__) else: snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a_ ( self) -> Dict: snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__) snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs(lowerCAmelCase__) snake_case_ = sd_pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> Union[str, Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def a_ ( self) -> Optional[int]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def a_ ( self) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler') snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
312
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class UpperCamelCase : def __init__( self) -> Dict: snake_case_ = {} def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=1) -> Any: if self.graph.get(lowerCAmelCase__): if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: snake_case_ = [[w, v]] if not self.graph.get(lowerCAmelCase__): snake_case_ = [] def a_ ( self) -> Any: return list(self.graph) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Any: if self.graph.get(lowerCAmelCase__): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> Optional[Any]: if s == d: return [] snake_case_ = [] snake_case_ = [] if s == -2: snake_case_ = list(self.graph)[0] stack.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) snake_case_ = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: snake_case_ = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(lowerCAmelCase__) return visited else: stack.append(node[1]) visited.append(node[1]) snake_case_ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__) != 0: snake_case_ = stack[len(lowerCAmelCase__) - 1] else: snake_case_ = ss # check if se have reached the starting point if len(lowerCAmelCase__) == 0: return visited def a_ ( self, lowerCAmelCase__=-1) -> Tuple: if c == -1: snake_case_ = floor(random() * 1_0000) + 10 for i in range(lowerCAmelCase__): # every vertex has max 100 edges for _ in range(floor(random() * 102) + 1): snake_case_ = floor(random() * c) + 1 if n != i: self.add_pair(lowerCAmelCase__, lowerCAmelCase__, 1) def a_ ( self, lowerCAmelCase__=-2) -> Tuple: snake_case_ = deque() snake_case_ = [] if s == -2: snake_case_ = list(self.graph)[0] d.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) while d: snake_case_ = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def a_ ( self, lowerCAmelCase__) -> Dict: return len(self.graph[u]) def a_ ( self, lowerCAmelCase__=-2) -> Any: snake_case_ = [] snake_case_ = [] if s == -2: snake_case_ = list(self.graph)[0] stack.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) snake_case_ = s snake_case_ = [] while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: snake_case_ = s for node in self.graph[s]: if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) snake_case_ = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop()) if len(lowerCAmelCase__) != 0: snake_case_ = stack[len(lowerCAmelCase__) - 1] else: snake_case_ = ss # check if se have reached the starting point if len(lowerCAmelCase__) == 0: return sorted_nodes def a_ ( self) -> List[Any]: snake_case_ = [] snake_case_ = [] snake_case_ = list(self.graph)[0] stack.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) snake_case_ = -2 snake_case_ = [] snake_case_ = s snake_case_ = False snake_case_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: snake_case_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): snake_case_ = len(lowerCAmelCase__) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) snake_case_ = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ = True if len(lowerCAmelCase__) != 0: snake_case_ = stack[len(lowerCAmelCase__) - 1] else: snake_case_ = False indirect_parents.append(lowerCAmelCase__) snake_case_ = s snake_case_ = ss # check if se have reached the starting point if len(lowerCAmelCase__) == 0: return list(lowerCAmelCase__) def a_ ( self) -> str: snake_case_ = [] snake_case_ = [] snake_case_ = list(self.graph)[0] stack.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) snake_case_ = -2 snake_case_ = [] snake_case_ = s snake_case_ = False snake_case_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: snake_case_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): snake_case_ = len(lowerCAmelCase__) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) snake_case_ = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ = True if len(lowerCAmelCase__) != 0: snake_case_ = stack[len(lowerCAmelCase__) - 1] else: snake_case_ = False indirect_parents.append(lowerCAmelCase__) snake_case_ = s snake_case_ = ss # check if se have reached the starting point if len(lowerCAmelCase__) == 0: return False def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> int: snake_case_ = time() self.dfs(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = time() return end - begin def a_ ( self, lowerCAmelCase__=-2) -> Tuple: snake_case_ = time() self.bfs(lowerCAmelCase__) snake_case_ = time() return end - begin class UpperCamelCase : def __init__( self) -> str: snake_case_ = {} def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=1) -> Union[str, Any]: # check if the u exists if self.graph.get(lowerCAmelCase__): # if there already is a edge if self.graph[u].count([w, v]) == 0: self.graph[u].append([w, v]) else: # if u does not exist snake_case_ = [[w, v]] # add the other way if self.graph.get(lowerCAmelCase__): # if there already is a edge if self.graph[v].count([w, u]) == 0: self.graph[v].append([w, u]) else: # if u does not exist snake_case_ = [[w, u]] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> str: if self.graph.get(lowerCAmelCase__): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(lowerCAmelCase__) # the other way round if self.graph.get(lowerCAmelCase__): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> Optional[Any]: if s == d: return [] snake_case_ = [] snake_case_ = [] if s == -2: snake_case_ = list(self.graph)[0] stack.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) snake_case_ = s while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: snake_case_ = s for node in self.graph[s]: if visited.count(node[1]) < 1: if node[1] == d: visited.append(lowerCAmelCase__) return visited else: stack.append(node[1]) visited.append(node[1]) snake_case_ = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(lowerCAmelCase__) != 0: snake_case_ = stack[len(lowerCAmelCase__) - 1] else: snake_case_ = ss # check if se have reached the starting point if len(lowerCAmelCase__) == 0: return visited def a_ ( self, lowerCAmelCase__=-1) -> Optional[Any]: if c == -1: snake_case_ = floor(random() * 1_0000) + 10 for i in range(lowerCAmelCase__): # every vertex has max 100 edges for _ in range(floor(random() * 102) + 1): snake_case_ = floor(random() * c) + 1 if n != i: self.add_pair(lowerCAmelCase__, lowerCAmelCase__, 1) def a_ ( self, lowerCAmelCase__=-2) -> Dict: snake_case_ = deque() snake_case_ = [] if s == -2: snake_case_ = list(self.graph)[0] d.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) while d: snake_case_ = d.popleft() if len(self.graph[s]) != 0: for node in self.graph[s]: if visited.count(node[1]) < 1: d.append(node[1]) visited.append(node[1]) return visited def a_ ( self, lowerCAmelCase__) -> Tuple: return len(self.graph[u]) def a_ ( self) -> Tuple: snake_case_ = [] snake_case_ = [] snake_case_ = list(self.graph)[0] stack.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) snake_case_ = -2 snake_case_ = [] snake_case_ = s snake_case_ = False snake_case_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: snake_case_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): snake_case_ = len(lowerCAmelCase__) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1]) break else: anticipating_nodes.add(stack[len_stack]) len_stack -= 1 if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) snake_case_ = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ = True if len(lowerCAmelCase__) != 0: snake_case_ = stack[len(lowerCAmelCase__) - 1] else: snake_case_ = False indirect_parents.append(lowerCAmelCase__) snake_case_ = s snake_case_ = ss # check if se have reached the starting point if len(lowerCAmelCase__) == 0: return list(lowerCAmelCase__) def a_ ( self) -> str: snake_case_ = [] snake_case_ = [] snake_case_ = list(self.graph)[0] stack.append(lowerCAmelCase__) visited.append(lowerCAmelCase__) snake_case_ = -2 snake_case_ = [] snake_case_ = s snake_case_ = False snake_case_ = set() while True: # check if there is any non isolated nodes if len(self.graph[s]) != 0: snake_case_ = s for node in self.graph[s]: if ( visited.count(node[1]) > 0 and node[1] != parent and indirect_parents.count(node[1]) > 0 and not on_the_way_back ): snake_case_ = len(lowerCAmelCase__) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1]) break else: return True if visited.count(node[1]) < 1: stack.append(node[1]) visited.append(node[1]) snake_case_ = node[1] break # check if all the children are visited if s == ss: stack.pop() snake_case_ = True if len(lowerCAmelCase__) != 0: snake_case_ = stack[len(lowerCAmelCase__) - 1] else: snake_case_ = False indirect_parents.append(lowerCAmelCase__) snake_case_ = s snake_case_ = ss # check if se have reached the starting point if len(lowerCAmelCase__) == 0: return False def a_ ( self) -> int: return list(self.graph) def a_ ( self, lowerCAmelCase__=-2, lowerCAmelCase__=-1) -> int: snake_case_ = time() self.dfs(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = time() return end - begin def a_ ( self, lowerCAmelCase__=-2) -> str: snake_case_ = time() self.bfs(lowerCAmelCase__) snake_case_ = time() return end - begin
312
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} ) SCREAMING_SNAKE_CASE_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: snake_case_ = processors[data_args.task_name]() snake_case_ = processor.get_labels() snake_case_ = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''yjernite/retribert-base-uncased''': 512, } __UpperCamelCase = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ = RetriBertTokenizer SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=True, lowerCAmelCase__="[UNK]", lowerCAmelCase__="[SEP]", lowerCAmelCase__="[PAD]", lowerCAmelCase__="[CLS]", lowerCAmelCase__="[MASK]", lowerCAmelCase__=True, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Union[str, Any]: super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, do_lower_case=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, tokenize_chinese_chars=lowerCAmelCase__, strip_accents=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get('lowercase', lowerCAmelCase__) != do_lower_case or normalizer_state.get('strip_accents', lowerCAmelCase__) != strip_accents or normalizer_state.get('handle_chinese_chars', lowerCAmelCase__) != tokenize_chinese_chars ): snake_case_ = getattr(lowerCAmelCase__, normalizer_state.pop('type')) snake_case_ = do_lower_case snake_case_ = strip_accents snake_case_ = tokenize_chinese_chars snake_case_ = normalizer_class(**lowerCAmelCase__) snake_case_ = do_lower_case def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> Dict: snake_case_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__)
312
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
1
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict: snake_case_ = [] for old_item in old_list: snake_case_ = old_item.replace('in_layers.0' , 'norm1' ) snake_case_ = new_item.replace('in_layers.2' , 'conv1' ) snake_case_ = new_item.replace('out_layers.0' , 'norm2' ) snake_case_ = new_item.replace('out_layers.3' , 'conv2' ) snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' ) snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]: snake_case_ = [] for old_item in old_list: snake_case_ = old_item snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' ) snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' ) snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]: assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case_ = old_checkpoint[path] snake_case_ = old_tensor.shape[0] // 3 snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3 snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 ) snake_case_ = query.reshape(UpperCAmelCase ) snake_case_ = key.reshape(UpperCAmelCase ) snake_case_ = value.reshape(UpperCAmelCase ) for path in paths: snake_case_ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case_ = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case_ = old_checkpoint[path['old']][:, :, 0] else: snake_case_ = old_checkpoint[path['old']] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = {} snake_case_ = checkpoint['time_embed.0.weight'] snake_case_ = checkpoint['time_embed.0.bias'] snake_case_ = checkpoint['time_embed.2.weight'] snake_case_ = checkpoint['time_embed.2.bias'] snake_case_ = checkpoint['input_blocks.0.0.weight'] snake_case_ = checkpoint['input_blocks.0.0.bias'] snake_case_ = checkpoint['out.0.weight'] snake_case_ = checkpoint['out.0.bias'] snake_case_ = checkpoint['out.2.weight'] snake_case_ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the middle blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the output blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } for i in range(1 , UpperCAmelCase ): snake_case_ = (i - 1) // (config['num_res_blocks'] + 1) snake_case_ = (i - 1) % (config['num_res_blocks'] + 1) snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.weight' ] snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase ) if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , ) snake_case_ = middle_blocks[0] snake_case_ = middle_blocks[1] snake_case_ = middle_blocks[2] snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase ) for i in range(UpperCAmelCase ): snake_case_ = i // (config['num_res_blocks'] + 1) snake_case_ = i % (config['num_res_blocks'] + 1) snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]] snake_case_ = {} for layer in output_block_layers: snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCAmelCase ) else: snake_case_ = [layer_name] if len(UpperCAmelCase ) > 1: snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(UpperCAmelCase ) == 2: snake_case_ = [] if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , ) else: snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] ) snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] ) snake_case_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __UpperCamelCase = json.loads(f.read()) __UpperCamelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __UpperCamelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
312
1
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} ) SCREAMING_SNAKE_CASE_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: snake_case_ = processors[data_args.task_name]() snake_case_ = processor.get_labels() snake_case_ = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # vision encoder if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCAmelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCAmelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def UpperCAmelCase ( ) -> Any: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int: snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCAmelCase ).eval() snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) processor.save_pretrained(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) print('Successfully saved processor and model to' , UpperCAmelCase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase , organization='nielsr' ) model.push_to_hub(UpperCAmelCase , organization='nielsr' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) __UpperCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''Salesforce/codegen-350M-nl''': '''https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json''', '''Salesforce/codegen-350M-multi''': '''https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json''', '''Salesforce/codegen-350M-mono''': '''https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json''', '''Salesforce/codegen-2B-nl''': '''https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json''', '''Salesforce/codegen-2B-multi''': '''https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json''', '''Salesforce/codegen-2B-mono''': '''https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json''', '''Salesforce/codegen-6B-nl''': '''https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json''', '''Salesforce/codegen-6B-multi''': '''https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json''', '''Salesforce/codegen-6B-mono''': '''https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json''', '''Salesforce/codegen-16B-nl''': '''https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json''', '''Salesforce/codegen-16B-multi''': '''https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json''', '''Salesforce/codegen-16B-mono''': '''https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "codegen" SCREAMING_SNAKE_CASE_ = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=5_0400, lowerCAmelCase__=2048, lowerCAmelCase__=2048, lowerCAmelCase__=4096, lowerCAmelCase__=28, lowerCAmelCase__=16, lowerCAmelCase__=64, lowerCAmelCase__=None, lowerCAmelCase__="gelu_new", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=1e-5, lowerCAmelCase__=0.02, lowerCAmelCase__=True, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, lowerCAmelCase__=False, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = n_ctx snake_case_ = n_positions snake_case_ = n_embd snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_inner snake_case_ = rotary_dim snake_case_ = activation_function snake_case_ = resid_pdrop snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = layer_norm_epsilon snake_case_ = initializer_range snake_case_ = use_cache snake_case_ = bos_token_id snake_case_ = eos_token_id super().__init__( bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, tie_word_embeddings=lowerCAmelCase__, **lowerCAmelCase__) class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = "default", lowerCAmelCase__ = None, lowerCAmelCase__ = False, ) -> Tuple: super().__init__(lowerCAmelCase__, task=lowerCAmelCase__, patching_specs=lowerCAmelCase__, use_past=lowerCAmelCase__) if not getattr(self._config, 'pad_token_id', lowerCAmelCase__): # TODO: how to do that better? snake_case_ = 0 @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: snake_case_ = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}}) if self.use_past: self.fill_with_past_key_values_(lowerCAmelCase__, direction='inputs') snake_case_ = {0: 'batch', 1: 'past_sequence + sequence'} else: snake_case_ = {0: 'batch', 1: 'sequence'} return common_inputs @property def a_ ( self) -> int: return self._config.n_layer @property def a_ ( self) -> int: return self._config.n_head def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = -1, lowerCAmelCase__ = -1, lowerCAmelCase__ = False, lowerCAmelCase__ = None, ) -> Mapping[str, Any]: snake_case_ = super(lowerCAmelCase__, self).generate_dummy_inputs( lowerCAmelCase__, batch_size=lowerCAmelCase__, seq_length=lowerCAmelCase__, is_pair=lowerCAmelCase__, framework=lowerCAmelCase__) # We need to order the input in the way they appears in the forward() snake_case_ = OrderedDict({'input_ids': common_inputs['input_ids']}) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.') else: import torch snake_case_ , snake_case_ = common_inputs['input_ids'].shape # Not using the same length for past_key_values snake_case_ = seqlen + 2 snake_case_ = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) snake_case_ = [ (torch.zeros(lowerCAmelCase__), torch.zeros(lowerCAmelCase__)) for _ in range(self.num_layers) ] snake_case_ = common_inputs['attention_mask'] if self.use_past: snake_case_ = ordered_inputs['attention_mask'].dtype snake_case_ = torch.cat( [ordered_inputs['attention_mask'], torch.ones(lowerCAmelCase__, lowerCAmelCase__, dtype=lowerCAmelCase__)], dim=1) return ordered_inputs @property def a_ ( self) -> int: return 13
312
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
1
"""simple docstring""" import pytest from datasets.parallel import ParallelBackendConfig, parallel_backend from datasets.utils.py_utils import map_nested from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # picklable for multiprocessing return i + 1 @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows def UpperCAmelCase ( ) -> List[str]: with parallel_backend('spark' ): assert ParallelBackendConfig.backend_name == "spark" snake_case_ = [1, 2, 3] with pytest.raises(UpperCAmelCase ): with parallel_backend('unsupported backend' ): map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=2 ) with pytest.raises(UpperCAmelCase ): with parallel_backend('unsupported backend' ): map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=-1 ) @require_dill_gt_0_3_2 @require_joblibspark @require_not_windows @pytest.mark.parametrize('num_proc' , [2, -1] ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = [1, 2] snake_case_ = {'a': 1, 'b': 2} snake_case_ = {'a': [1, 2], 'b': [3, 4]} snake_case_ = {'a': {'1': 1}, 'b': 2} snake_case_ = {'a': 1, 'b': 2, 'c': 3, 'd': 4} snake_case_ = [2, 3] snake_case_ = {'a': 2, 'b': 3} snake_case_ = {'a': [2, 3], 'b': [4, 5]} snake_case_ = {'a': {'1': 2}, 'b': 3} snake_case_ = {'a': 2, 'b': 3, 'c': 4, 'd': 5} with parallel_backend('spark' ): assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa assert map_nested(UpperCAmelCase , UpperCAmelCase , num_proc=UpperCAmelCase ) == expected_map_nested_sa
312
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) snake_case_ = number_of_bytes // partitions snake_case_ = [] for i in range(UpperCAmelCase ): snake_case_ = i * bytes_per_partition + 1 snake_case_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, lowerCAmelCase__, lowerCAmelCase__=13, lowerCAmelCase__=7, lowerCAmelCase__=True, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=True, lowerCAmelCase__=99, lowerCAmelCase__=32, lowerCAmelCase__=5, lowerCAmelCase__=4, lowerCAmelCase__=64, lowerCAmelCase__="gelu", lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=512, lowerCAmelCase__=16, lowerCAmelCase__=2, lowerCAmelCase__=0.02, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=None, lowerCAmelCase__=2, lowerCAmelCase__=2, lowerCAmelCase__=2, lowerCAmelCase__=2, lowerCAmelCase__=4, lowerCAmelCase__=1, ) -> Any: snake_case_ = parent snake_case_ = batch_size snake_case_ = seq_length snake_case_ = is_training snake_case_ = use_input_mask snake_case_ = use_token_type_ids snake_case_ = use_labels snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = type_sequence_label_size snake_case_ = initializer_range snake_case_ = num_labels snake_case_ = num_choices snake_case_ = scope snake_case_ = q_groups snake_case_ = k_groups snake_case_ = v_groups snake_case_ = post_attention_groups snake_case_ = intermediate_groups snake_case_ = output_groups def a_ ( self) -> Optional[Any]: snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) snake_case_ = None if self.use_input_mask: snake_case_ = random_attention_mask([self.batch_size, self.seq_length]) snake_case_ = None snake_case_ = None snake_case_ = None if self.use_labels: snake_case_ = ids_tensor([self.batch_size], self.type_sequence_label_size) snake_case_ = ids_tensor([self.batch_size, self.seq_length], self.num_labels) snake_case_ = ids_tensor([self.batch_size], self.num_choices) snake_case_ = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def a_ ( self) -> Optional[Any]: return SqueezeBertConfig( embedding_size=self.hidden_size, vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, attention_probs_dropout_prob=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, q_groups=self.q_groups, k_groups=self.k_groups, v_groups=self.v_groups, post_attention_groups=self.post_attention_groups, intermediate_groups=self.intermediate_groups, output_groups=self.output_groups, ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = SqueezeBertModel(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = model(lowerCAmelCase__) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = SqueezeBertForMaskedLM(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Any: snake_case_ = SqueezeBertForQuestionAnswering(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model( lowerCAmelCase__, attention_mask=lowerCAmelCase__, start_positions=lowerCAmelCase__, end_positions=lowerCAmelCase__) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> int: snake_case_ = self.num_labels snake_case_ = SqueezeBertForSequenceClassification(lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = self.num_labels snake_case_ = SqueezeBertForTokenClassification(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = model(lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> List[str]: snake_case_ = self.num_choices snake_case_ = SqueezeBertForMultipleChoice(config=lowerCAmelCase__) model.to(lowerCAmelCase__) model.eval() snake_case_ = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() snake_case_ = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() snake_case_ = model( lowerCAmelCase__, attention_mask=lowerCAmelCase__, labels=lowerCAmelCase__, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def a_ ( self) -> Any: snake_case_ = self.prepare_config_and_inputs() ((snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_) , (snake_case_)) = config_and_inputs snake_case_ = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) SCREAMING_SNAKE_CASE_ = ( { "feature-extraction": SqueezeBertModel, "fill-mask": SqueezeBertForMaskedLM, "question-answering": SqueezeBertForQuestionAnswering, "text-classification": SqueezeBertForSequenceClassification, "token-classification": SqueezeBertForTokenClassification, "zero-shot": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = False def a_ ( self) -> Tuple: snake_case_ = SqueezeBertModelTester(self) snake_case_ = ConfigTester(self, config_class=lowerCAmelCase__, dim=37) def a_ ( self) -> Optional[Any]: self.config_tester.run_common_tests() def a_ ( self) -> List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*lowerCAmelCase__) def a_ ( self) -> List[str]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*lowerCAmelCase__) def a_ ( self) -> List[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*lowerCAmelCase__) def a_ ( self) -> Optional[Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*lowerCAmelCase__) def a_ ( self) -> Union[str, Any]: snake_case_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*lowerCAmelCase__) @slow def a_ ( self) -> Optional[Any]: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: snake_case_ = SqueezeBertModel.from_pretrained(lowerCAmelCase__) self.assertIsNotNone(lowerCAmelCase__) @require_sentencepiece @require_tokenizers @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def a_ ( self) -> str: snake_case_ = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli') snake_case_ = torch.tensor([[1, 2_9414, 232, 328, 740, 1140, 1_2695, 69, 13, 1588, 2]]) snake_case_ = model(lowerCAmelCase__)[0] snake_case_ = torch.Size((1, 3)) self.assertEqual(output.shape, lowerCAmelCase__) snake_case_ = torch.tensor([[0.6401, -0.0349, -0.6041]]) self.assertTrue(torch.allclose(lowerCAmelCase__, lowerCAmelCase__, atol=1e-4))
312
"""simple docstring""" __UpperCamelCase = 256 # Modulus to hash a string __UpperCamelCase = 100_0003 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool: snake_case_ = len(UpperCAmelCase ) snake_case_ = len(UpperCAmelCase ) if p_len > t_len: return False snake_case_ = 0 snake_case_ = 0 snake_case_ = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCAmelCase ): snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: snake_case_ = 'abc1abc12' snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' snake_case_ = 'alskfjaldsk23adsfabcabc' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 2) snake_case_ = 'ABABX' snake_case_ = 'ABABZABABYABABX' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 3) snake_case_ = 'AAAB' snake_case_ = 'ABAAAAAB' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 4) snake_case_ = 'abcdabcy' snake_case_ = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 5) snake_case_ = 'Lü' snake_case_ = 'Lüsai' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'Lue' assert not rabin_karp(UpperCAmelCase , UpperCAmelCase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
312
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule __UpperCamelCase = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']} if TYPE_CHECKING: from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "resnet" SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}') snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = downsample_in_first_stage snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-3
312
1
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black __UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. __UpperCamelCase = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Any: snake_case_ = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir, 'models/bert/')) snake_case_ = self.transformer_dir shutil.copy( os.path.join(lowerCAmelCase__, 'src/transformers/models/bert/modeling_bert.py'), os.path.join(self.transformer_dir, 'models/bert/modeling_bert.py'), ) def a_ ( self) -> Any: snake_case_ = 'src/transformers' shutil.rmtree(self.transformer_dir) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=None) -> List[str]: snake_case_ = comment + f'\nclass {class_name}(nn.Module):\n' + class_code if overwrite_result is not None: snake_case_ = comment + f'\nclass {class_name}(nn.Module):\n' + overwrite_result snake_case_ = black.Mode(target_versions={black.TargetVersion.PYaa}, line_length=119) snake_case_ = black.format_str(lowerCAmelCase__, mode=lowerCAmelCase__) snake_case_ = os.path.join(self.transformer_dir, 'new_code.py') with open(lowerCAmelCase__, 'w', newline='\n') as f: f.write(lowerCAmelCase__) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowerCAmelCase__)) == 0) else: check_copies.is_copy_consistent(f.name, overwrite=lowerCAmelCase__) with open(lowerCAmelCase__, 'r') as f: self.assertTrue(f.read(), lowerCAmelCase__) def a_ ( self) -> Optional[int]: snake_case_ = check_copies.find_code_in_transformers('models.bert.modeling_bert.BertLMPredictionHead') self.assertEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> List[str]: # Base copy consistency self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', REFERENCE_CODE + '\n', ) # With no empty line at the end self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead', 'BertLMPredictionHead', lowerCAmelCase__, ) # Copy consistency with rename self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', re.sub('Bert', 'TestModel', lowerCAmelCase__), ) # Copy consistency with a really long name snake_case_ = 'TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f'# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}', f'{long_class_name}LMPredictionHead', re.sub('Bert', lowerCAmelCase__, lowerCAmelCase__), ) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel', 'TestModelLMPredictionHead', lowerCAmelCase__, overwrite_result=re.sub('Bert', 'TestModel', lowerCAmelCase__), ) def a_ ( self) -> str: snake_case_ = check_copies.LOCALIZED_READMES['README_zh-hans.md'] snake_case_ = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),' ' released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**' ' (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders' ' as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang' ' Luong, Quoc V. Le, Christopher D. Manning.' ) snake_case_ = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) snake_case_ = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1.' ' **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文' ' [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and' ' lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same' ' method has been applied to compress GPT2 into' ' [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into' ' [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),' ' Multilingual BERT into' ' [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German' ' version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自' ' Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather' ' than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,' ' Christopher D. Manning 发布。\n' ) snake_case_ , snake_case_ = check_copies.convert_to_localized_md( lowerCAmelCase__, lowerCAmelCase__, localized_readme['format_model_list']) self.assertFalse(lowerCAmelCase__) self.assertEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ , snake_case_ = check_copies.convert_to_localized_md( lowerCAmelCase__, lowerCAmelCase__, localized_readme['format_model_list']) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(lowerCAmelCase__) snake_case_ = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the' ' Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for' ' Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong' ' Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.' ) snake_case_ = ( '1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and' ' the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) snake_case_ = ( '1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the' ' Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of' ' Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian' ' Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n' ) snake_case_ , snake_case_ = check_copies.convert_to_localized_md( lowerCAmelCase__, lowerCAmelCase__, localized_readme['format_model_list']) # Check if the model link is synchronized. self.assertEqual(lowerCAmelCase__, lowerCAmelCase__)
312
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = 'this is a test' snake_case_ = 'this is a test' return input_text, output_text def a_ ( self) -> Optional[int]: snake_case_ = '<pad>' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '<pad>') self.assertEqual(vocab_keys[1], '<unk>') self.assertEqual(vocab_keys[-1], '[PAD]') self.assertEqual(len(lowerCAmelCase__), 3_0001) def a_ ( self) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size, 3_0000) def a_ ( self) -> List[str]: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> str: pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> List[Any]: pass def a_ ( self) -> str: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> List[Any]: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Any: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = 'This is a test' snake_case_ = [13, 1, 4398, 25, 21, 1289] snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = DebertaVaTokenizer(lowerCAmelCase__) snake_case_ = tokenizer.encode('sequence builders') snake_case_ = tokenizer.encode('multi-sequence build') snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, ) @slow def a_ ( self) -> Union[str, Any]: # fmt: off snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
312
1
"""simple docstring""" def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: # Base Case if curr_ind == len(UpperCAmelCase ): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(UpperCAmelCase ) ): if valid_connection(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): # Insert current vertex into path as next transition snake_case_ = next_ver # Validate created path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , curr_ind + 1 ): return True # Backtrack snake_case_ = -1 return False def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 0 ) -> list[int]: snake_case_ = [-1] * (len(UpperCAmelCase ) + 1) # initialize start and end of path with starting index snake_case_ = snake_case_ = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(UpperCAmelCase , UpperCAmelCase , 1 ) else []
312
"""simple docstring""" # flake8: noqa # Lint as: python3 from typing import Dict, List, Optional, Type from .. import config from ..utils import logging from .formatting import ( ArrowFormatter, CustomFormatter, Formatter, PandasFormatter, PythonFormatter, TensorFormatter, format_table, query_table, ) from .np_formatter import NumpyFormatter __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {} __UpperCamelCase = {} __UpperCamelCase = {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None , ) -> Optional[Any]: snake_case_ = aliases if aliases is not None else [] if format_type in _FORMAT_TYPES: logger.warning( f'Overwriting format type \'{format_type}\' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})' ) snake_case_ = formatter_cls for alias in set(aliases + [format_type] ): if alias in _FORMAT_TYPES_ALIASES: logger.warning( f'Overwriting format type alias \'{alias}\' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})' ) snake_case_ = format_type def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = None ) -> Union[str, Any]: snake_case_ = aliases if aliases is not None else [] for alias in set(aliases + [format_type] ): snake_case_ = unavailable_error # Here we define all the available formatting functions that can be used by `Dataset.set_format` _register_formatter(PythonFormatter, None, aliases=['''python''']) _register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow''']) _register_formatter(NumpyFormatter, '''numpy''', aliases=['''np''']) _register_formatter(PandasFormatter, '''pandas''', aliases=['''pd''']) _register_formatter(CustomFormatter, '''custom''') if config.TORCH_AVAILABLE: from .torch_formatter import TorchFormatter _register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch''']) else: __UpperCamelCase = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''') _register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch''']) if config.TF_AVAILABLE: from .tf_formatter import TFFormatter _register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf''']) else: __UpperCamelCase = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''') _register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf''']) if config.JAX_AVAILABLE: from .jax_formatter import JaxFormatter _register_formatter(JaxFormatter, '''jax''', aliases=[]) else: __UpperCamelCase = ValueError('''JAX needs to be installed to be able to return JAX arrays.''') _register_unavailable_formatter(_jax_error, '''jax''', aliases=[]) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[str]: if format_type in _FORMAT_TYPES_ALIASES: return _FORMAT_TYPES_ALIASES[format_type] else: return format_type def UpperCAmelCase ( UpperCAmelCase , **UpperCAmelCase ) -> Formatter: snake_case_ = get_format_type_from_alias(UpperCAmelCase ) if format_type in _FORMAT_TYPES: return _FORMAT_TYPES[format_type](**UpperCAmelCase ) if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE: raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type] else: raise ValueError( f'Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got \'{format_type}\'' )
312
1
"""simple docstring""" import argparse import json import math import os import time import traceback import zipfile from collections import Counter import requests def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Tuple: snake_case_ = None if token is not None: snake_case_ = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'} snake_case_ = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100' snake_case_ = requests.get(UpperCAmelCase , headers=UpperCAmelCase ).json() snake_case_ = {} try: job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) snake_case_ = math.ceil((result['total_count'] - 100) / 100 ) for i in range(UpperCAmelCase ): snake_case_ = requests.get(url + f'&page={i + 2}' , headers=UpperCAmelCase ).json() job_links.update({job['name']: job['html_url'] for job in result['jobs']} ) return job_links except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Optional[Any]: snake_case_ = None if token is not None: snake_case_ = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'} snake_case_ = f'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100' snake_case_ = requests.get(UpperCAmelCase , headers=UpperCAmelCase ).json() snake_case_ = {} try: artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) snake_case_ = math.ceil((result['total_count'] - 100) / 100 ) for i in range(UpperCAmelCase ): snake_case_ = requests.get(url + f'&page={i + 2}' , headers=UpperCAmelCase ).json() artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} ) return artifacts except Exception: print(f'Unknown error, could not fetch links:\n{traceback.format_exc()}' ) return {} def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = None if token is not None: snake_case_ = {'Accept': 'application/vnd.github+json', 'Authorization': f'Bearer {token}'} snake_case_ = requests.get(UpperCAmelCase , headers=UpperCAmelCase , allow_redirects=UpperCAmelCase ) snake_case_ = result.headers['Location'] snake_case_ = requests.get(UpperCAmelCase , allow_redirects=UpperCAmelCase ) snake_case_ = os.path.join(UpperCAmelCase , f'{artifact_name}.zip' ) with open(UpperCAmelCase , 'wb' ) as fp: fp.write(response.content ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Dict: snake_case_ = [] snake_case_ = [] snake_case_ = None with zipfile.ZipFile(UpperCAmelCase ) as z: for filename in z.namelist(): if not os.path.isdir(UpperCAmelCase ): # read the file if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]: with z.open(UpperCAmelCase ) as f: for line in f: snake_case_ = line.decode('UTF-8' ).strip() if filename == "failures_line.txt": try: # `error_line` is the place where `error` occurs snake_case_ = line[: line.index(': ' )] snake_case_ = line[line.index(': ' ) + len(': ' ) :] errors.append([error_line, error] ) except Exception: # skip un-related lines pass elif filename == "summary_short.txt" and line.startswith('FAILED ' ): # `test` is the test method that failed snake_case_ = line[len('FAILED ' ) :] failed_tests.append(UpperCAmelCase ) elif filename == "job_name.txt": snake_case_ = line if len(UpperCAmelCase ) != len(UpperCAmelCase ): raise ValueError( f'`errors` and `failed_tests` should have the same number of elements. Got {len(UpperCAmelCase )} for `errors` ' f'and {len(UpperCAmelCase )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some' ' problem.' ) snake_case_ = None if job_name and job_links: snake_case_ = job_links.get(UpperCAmelCase , UpperCAmelCase ) # A list with elements of the form (line of error, error, failed test) snake_case_ = [x + [y] + [job_link] for x, y in zip(UpperCAmelCase , UpperCAmelCase )] return result def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Any: snake_case_ = [] snake_case_ = [os.path.join(UpperCAmelCase , UpperCAmelCase ) for p in os.listdir(UpperCAmelCase ) if p.endswith('.zip' )] for p in paths: errors.extend(get_errors_from_single_artifact(UpperCAmelCase , job_links=UpperCAmelCase ) ) return errors def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> Dict: snake_case_ = Counter() counter.update([x[1] for x in logs] ) snake_case_ = counter.most_common() snake_case_ = {} for error, count in counts: if error_filter is None or error not in error_filter: snake_case_ = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]} snake_case_ = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=UpperCAmelCase ) ) return r def UpperCAmelCase ( UpperCAmelCase ) -> str: snake_case_ = test.split('::' )[0] if test.startswith('tests/models/' ): snake_case_ = test.split('/' )[2] else: snake_case_ = None return test def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> int: snake_case_ = [(x[0], x[1], get_model(x[2] )) for x in logs] snake_case_ = [x for x in logs if x[2] is not None] snake_case_ = {x[2] for x in logs} snake_case_ = {} for test in tests: snake_case_ = Counter() # count by errors in `test` counter.update([x[1] for x in logs if x[2] == test] ) snake_case_ = counter.most_common() snake_case_ = {error: count for error, count in counts if (error_filter is None or error not in error_filter)} snake_case_ = sum(error_counts.values() ) if n_errors > 0: snake_case_ = {'count': n_errors, 'errors': error_counts} snake_case_ = dict(sorted(r.items() , key=lambda UpperCAmelCase : item[1]["count"] , reverse=UpperCAmelCase ) ) return r def UpperCAmelCase ( UpperCAmelCase ) -> Any: snake_case_ = '| no. | error | status |' snake_case_ = '|-:|:-|:-|' snake_case_ = [header, sep] for error in reduced_by_error: snake_case_ = reduced_by_error[error]['count'] snake_case_ = f'| {count} | {error[:100]} | |' lines.append(UpperCAmelCase ) return "\n".join(UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> str: snake_case_ = '| model | no. of errors | major error | count |' snake_case_ = '|-:|-:|-:|-:|' snake_case_ = [header, sep] for model in reduced_by_model: snake_case_ = reduced_by_model[model]['count'] snake_case_ , snake_case_ = list(reduced_by_model[model]['errors'].items() )[0] snake_case_ = f'| {model} | {count} | {error[:60]} | {_count} |' lines.append(UpperCAmelCase ) return "\n".join(UpperCAmelCase ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''') parser.add_argument( '''--output_dir''', type=str, required=True, help='''Where to store the downloaded artifacts and other result files.''', ) parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''') __UpperCamelCase = parser.parse_args() os.makedirs(args.output_dir, exist_ok=True) __UpperCamelCase = get_job_links(args.workflow_run_id, token=args.token) __UpperCamelCase = {} # To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee. # For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`. if _job_links: for k, v in _job_links.items(): # This is how GitHub actions combine job names. if " / " in k: __UpperCamelCase = k.find(''' / ''') __UpperCamelCase = k[index + len(''' / ''') :] __UpperCamelCase = v with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(job_links, fp, ensure_ascii=False, indent=4) __UpperCamelCase = get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) for idx, (name, url) in enumerate(artifacts.items()): download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) __UpperCamelCase = get_all_errors(args.output_dir, job_links=job_links) # `e[1]` is the error __UpperCamelCase = Counter() counter.update([e[1] for e in errors]) # print the top 30 most common test errors __UpperCamelCase = counter.most_common(30) for item in most_common: print(item) with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp: json.dump(errors, fp, ensure_ascii=False, indent=4) __UpperCamelCase = reduce_by_error(errors) __UpperCamelCase = reduce_by_model(errors) __UpperCamelCase = make_github_table(reduced_by_error) __UpperCamelCase = make_github_table_per_model(reduced_by_model) with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa) with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp: fp.write(sa)
312
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mvp import MvpTokenizer __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''} # See all MVP models at https://huggingface.co/models?filter=mvp __UpperCamelCase = { '''vocab_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/vocab.json''', }, '''added_tokens.json''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/added_tokens.json''', }, '''merges_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/merges.txt''', }, '''tokenizer_file''': { '''RUCAIBox/mvp''': '''https://huggingface.co/RUCAIBox/mvp/resolve/main/tokenizer.json''', }, } __UpperCamelCase = { '''RUCAIBox/mvp''': 1024, } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = MvpTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="replace", lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", lowerCAmelCase__=False, lowerCAmelCase__=True, **lowerCAmelCase__, ) -> Union[str, Any]: super().__init__( lowerCAmelCase__, lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, errors=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, add_prefix_space=lowerCAmelCase__, trim_offsets=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__()) if pre_tok_state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = getattr(lowerCAmelCase__, pre_tok_state.pop('type')) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**lowerCAmelCase__) snake_case_ = add_prefix_space # the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__` snake_case_ = 'post_processor' snake_case_ = getattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) if tokenizer_component_instance: snake_case_ = json.loads(tokenizer_component_instance.__getstate__()) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: snake_case_ = tuple(state['sep']) if "cls" in state: snake_case_ = tuple(state['cls']) snake_case_ = False if state.get('add_prefix_space', lowerCAmelCase__) != add_prefix_space: snake_case_ = add_prefix_space snake_case_ = True if state.get('trim_offsets', lowerCAmelCase__) != trim_offsets: snake_case_ = trim_offsets snake_case_ = True if changes_to_apply: snake_case_ = getattr(lowerCAmelCase__, state.pop('type')) snake_case_ = component_class(**lowerCAmelCase__) setattr(self.backend_tokenizer, lowerCAmelCase__, lowerCAmelCase__) @property def a_ ( self) -> str: if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.') return None return str(self._mask_token) @mask_token.setter def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else value snake_case_ = value def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._batch_encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, *lowerCAmelCase__, **lowerCAmelCase__) -> BatchEncoding: snake_case_ = kwargs.get('is_split_into_words', lowerCAmelCase__) if is_split_into_words and not self.add_prefix_space: raise ValueError( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' 'to use it with pretokenized inputs.') return super()._encode_plus(*lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: snake_case_ = self._tokenizer.model.save(lowerCAmelCase__, name=lowerCAmelCase__) return tuple(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None) -> str: snake_case_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0]
312
1
"""simple docstring""" import copy import re class UpperCamelCase : SCREAMING_SNAKE_CASE_ = "hp" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None @classmethod def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = prefix snake_case_ = defaults cls.build_naming_info() @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: if len(lowerCAmelCase__) == 0: return "" snake_case_ = None if any(char.isdigit() for char in word): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(lowerCAmelCase__) + 1): snake_case_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: snake_case_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase__): snake_case_ = '' while integer != 0: snake_case_ = chr(ord('A') + integer % 10) + s integer //= 10 return s snake_case_ = 0 while True: snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__) if sword in info["reverse_short_word"]: continue else: snake_case_ = sword break snake_case_ = short_word snake_case_ = word return short_word @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = param_name.split('_') snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name snake_case_ = ['', '_'] for separator in separators: snake_case_ = separator.join(lowerCAmelCase__) if shortname not in info["reverse_short_param"]: snake_case_ = shortname snake_case_ = param_name return shortname return param_name @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = short_name snake_case_ = param_name @classmethod def a_ ( cls) -> List[str]: if cls.NAMING_INFO is not None: return snake_case_ = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } snake_case_ = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = info @classmethod def a_ ( cls, lowerCAmelCase__) -> List[Any]: cls.build_naming_info() assert cls.PREFIX is not None snake_case_ = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue snake_case_ = cls.NAMING_INFO['short_param'][k] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = 1 if v else 0 snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-' snake_case_ = f'{key}{sep}{v}' name.append(lowerCAmelCase__) return "_".join(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__) -> Optional[Any]: snake_case_ = repr[len(cls.PREFIX) + 1 :] if repr == "": snake_case_ = [] else: snake_case_ = repr.split('_') snake_case_ = {} for value in values: if "-" in value: snake_case_ , snake_case_ = value.split('-') else: snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__) snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__)) snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k] snake_case_ = p_v for k in cls.DEFAULTS: if k not in parameters: snake_case_ = cls.DEFAULTS[k] return parameters
312
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline" def a_ ( self, lowerCAmelCase__=0) -> List[Any]: snake_case_ = floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase__)) snake_case_ = np.random.RandomState(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'strength': 0.75, 'guidance_scale': 7.5, 'output_type': 'numpy', } return inputs def a_ ( self) -> Optional[Any]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087]) assert np.abs(image_slice - expected_slice).max() < 1e-1 def a_ ( self) -> List[str]: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> str: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) # warmup pass to apply optimizations snake_case_ = pipe(**self.get_dummy_inputs()) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> int: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 def a_ ( self) -> Dict: snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint, provider='CPUExecutionProvider') snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs() snake_case_ = pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) snake_case_ = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): @property def a_ ( self) -> int: return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def a_ ( self) -> str: snake_case_ = ort.SessionOptions() snake_case_ = False return options def a_ ( self) -> Any: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) # using the PNDM scheduler by default snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'CompVis/stable-diffusion-v1-4', revision='onnx', safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2 def a_ ( self) -> List[Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/img2img/sketch-mountains-input.jpg') snake_case_ = init_image.resize((768, 512)) snake_case_ = LMSDiscreteScheduler.from_pretrained( 'runwayml/stable-diffusion-v1-5', subfolder='scheduler', revision='onnx') snake_case_ = OnnxStableDiffusionImgaImgPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5', revision='onnx', scheduler=lowerCAmelCase__, safety_checker=lowerCAmelCase__, feature_extractor=lowerCAmelCase__, provider=self.gpu_provider, sess_options=self.gpu_options, ) pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = 'A fantasy landscape, trending on artstation' snake_case_ = np.random.RandomState(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, strength=0.75, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images snake_case_ = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) snake_case_ = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431]) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice).max() < 2e-2
312
1
"""simple docstring""" __UpperCamelCase = '''0.18.2''' from .configuration_utils import ConfigMixin from .utils import ( OptionalDependencyNotAvailable, is_flax_available, is_inflect_available, is_invisible_watermark_available, is_k_diffusion_available, is_k_diffusion_version, is_librosa_available, is_note_seq_available, is_onnx_available, is_scipy_available, is_torch_available, is_torchsde_available, is_transformers_available, is_transformers_version, is_unidecode_available, logging, ) try: if not is_onnx_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_onnx_objects import * # noqa F403 else: from .pipelines import OnnxRuntimeModel try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_pt_objects import * # noqa F403 else: from .models import ( AutoencoderKL, ControlNetModel, ModelMixin, PriorTransformer, TaFilmDecoder, TransformeraDModel, UNetaDModel, UNetaDConditionModel, UNetaDModel, UNetaDConditionModel, VQModel, ) from .optimization import ( get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) from .pipelines import ( AudioPipelineOutput, ConsistencyModelPipeline, DanceDiffusionPipeline, DDIMPipeline, DDPMPipeline, DiffusionPipeline, DiTPipeline, ImagePipelineOutput, KarrasVePipeline, LDMPipeline, LDMSuperResolutionPipeline, PNDMPipeline, RePaintPipeline, ScoreSdeVePipeline, ) from .schedulers import ( CMStochasticIterativeScheduler, DDIMInverseScheduler, DDIMParallelScheduler, DDIMScheduler, DDPMParallelScheduler, DDPMScheduler, DEISMultistepScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, DPMSolverSinglestepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, HeunDiscreteScheduler, IPNDMScheduler, KarrasVeScheduler, KDPMaAncestralDiscreteScheduler, KDPMaDiscreteScheduler, PNDMScheduler, RePaintScheduler, SchedulerMixin, ScoreSdeVeScheduler, UnCLIPScheduler, UniPCMultistepScheduler, VQDiffusionScheduler, ) from .training_utils import EMAModel try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .schedulers import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .schedulers import DPMSolverSDEScheduler try: if not (is_torch_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_objects import * # noqa F403 else: from .pipelines import ( AltDiffusionImgaImgPipeline, AltDiffusionPipeline, AudioLDMPipeline, CycleDiffusionPipeline, IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ImageTextPipelineOutput, KandinskyImgaImgPipeline, KandinskyInpaintPipeline, KandinskyPipeline, KandinskyPriorPipeline, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaControlnetPipeline, KandinskyVaaImgaImgPipeline, KandinskyVaaInpaintPipeline, KandinskyVaaPipeline, KandinskyVaaPriorEmbaEmbPipeline, KandinskyVaaPriorPipeline, LDMTextToImagePipeline, PaintByExamplePipeline, SemanticStableDiffusionPipeline, ShapEImgaImgPipeline, ShapEPipeline, StableDiffusionAttendAndExcitePipeline, StableDiffusionControlNetImgaImgPipeline, StableDiffusionControlNetInpaintPipeline, StableDiffusionControlNetPipeline, StableDiffusionDepthaImgPipeline, StableDiffusionDiffEditPipeline, StableDiffusionImageVariationPipeline, StableDiffusionImgaImgPipeline, StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy, StableDiffusionInstructPixaPixPipeline, StableDiffusionLatentUpscalePipeline, StableDiffusionLDMaDPipeline, StableDiffusionModelEditingPipeline, StableDiffusionPanoramaPipeline, StableDiffusionParadigmsPipeline, StableDiffusionPipeline, StableDiffusionPipelineSafe, StableDiffusionPixaPixZeroPipeline, StableDiffusionSAGPipeline, StableDiffusionUpscalePipeline, StableUnCLIPImgaImgPipeline, StableUnCLIPPipeline, TextToVideoSDPipeline, TextToVideoZeroPipeline, UnCLIPImageVariationPipeline, UnCLIPPipeline, UniDiffuserModel, UniDiffuserPipeline, UniDiffuserTextDecoder, VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, VideoToVideoSDPipeline, VQDiffusionPipeline, ) try: if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403 else: from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline try: if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403 else: from .pipelines import StableDiffusionKDiffusionPipeline try: if not (is_torch_available() and is_transformers_available() and is_onnx_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403 else: from .pipelines import ( OnnxStableDiffusionImgaImgPipeline, OnnxStableDiffusionInpaintPipeline, OnnxStableDiffusionInpaintPipelineLegacy, OnnxStableDiffusionPipeline, OnnxStableDiffusionUpscalePipeline, StableDiffusionOnnxPipeline, ) try: if not (is_torch_available() and is_librosa_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_torch_and_librosa_objects import * # noqa F403 else: from .pipelines import AudioDiffusionPipeline, Mel try: if not (is_transformers_available() and is_torch_available() and is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403 else: from .pipelines import SpectrogramDiffusionPipeline try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_objects import * # noqa F403 else: from .models.controlnet_flax import FlaxControlNetModel from .models.modeling_flax_utils import FlaxModelMixin from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel from .models.vae_flax import FlaxAutoencoderKL from .pipelines import FlaxDiffusionPipeline from .schedulers import ( FlaxDDIMScheduler, FlaxDDPMScheduler, FlaxDPMSolverMultistepScheduler, FlaxKarrasVeScheduler, FlaxLMSDiscreteScheduler, FlaxPNDMScheduler, FlaxSchedulerMixin, FlaxScoreSdeVeScheduler, ) try: if not (is_flax_available() and is_transformers_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_flax_and_transformers_objects import * # noqa F403 else: from .pipelines import ( FlaxStableDiffusionControlNetPipeline, FlaxStableDiffusionImgaImgPipeline, FlaxStableDiffusionInpaintPipeline, FlaxStableDiffusionPipeline, ) try: if not (is_note_seq_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from .utils.dummy_note_seq_objects import * # noqa F403 else: from .pipelines import MidiProcessor
312
"""simple docstring""" import io import math from typing import Dict, Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import convert_to_rgb, normalize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_vision_available, logging from ...utils.import_utils import requires_backends if is_vision_available(): import textwrap from PIL import Image, ImageDraw, ImageFont if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: __UpperCamelCase = False __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = '''ybelkada/fonts''' def UpperCAmelCase ( ) -> Dict: if is_torch_available() and not is_torch_greater_or_equal_than_1_11: raise ImportError( f'You are using torch=={torch.__version__}, but torch>=1.11.0 is required to use ' 'Pix2StructImageProcessor. Please upgrade torch.' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: requires_backends(UpperCAmelCase , ['torch'] ) _check_torch_version() snake_case_ = image_tensor.unsqueeze(0 ) snake_case_ = torch.nn.functional.unfold(UpperCAmelCase , (patch_height, patch_width) , stride=(patch_height, patch_width) ) snake_case_ = patches.reshape(image_tensor.size(0 ) , image_tensor.size(1 ) , UpperCAmelCase , UpperCAmelCase , -1 ) snake_case_ = patches.permute(0 , 4 , 2 , 3 , 1 ).reshape( image_tensor.size(2 ) // patch_height , image_tensor.size(3 ) // patch_width , image_tensor.size(1 ) * patch_height * patch_width , ) return patches.unsqueeze(0 ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase = 36 , UpperCAmelCase = "black" , UpperCAmelCase = "white" , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = 5 , UpperCAmelCase = None , UpperCAmelCase = None , ) -> Image.Image: requires_backends(UpperCAmelCase , 'vision' ) # Add new lines so that each line is no more than 80 characters. snake_case_ = textwrap.TextWrapper(width=80 ) snake_case_ = wrapper.wrap(text=UpperCAmelCase ) snake_case_ = '\n'.join(UpperCAmelCase ) if font_bytes is not None and font_path is None: snake_case_ = io.BytesIO(UpperCAmelCase ) elif font_path is not None: snake_case_ = font_path else: snake_case_ = hf_hub_download(UpperCAmelCase , 'Arial.TTF' ) snake_case_ = ImageFont.truetype(UpperCAmelCase , encoding='UTF-8' , size=UpperCAmelCase ) # Use a temporary canvas to determine the width and height in pixels when # rendering the text. snake_case_ = ImageDraw.Draw(Image.new('RGB' , (1, 1) , UpperCAmelCase ) ) snake_case_ , snake_case_ , snake_case_ , snake_case_ = temp_draw.textbbox((0, 0) , UpperCAmelCase , UpperCAmelCase ) # Create the actual image with a bit of padding around the text. snake_case_ = text_width + left_padding + right_padding snake_case_ = text_height + top_padding + bottom_padding snake_case_ = Image.new('RGB' , (image_width, image_height) , UpperCAmelCase ) snake_case_ = ImageDraw.Draw(UpperCAmelCase ) draw.text(xy=(left_padding, top_padding) , text=UpperCAmelCase , fill=UpperCAmelCase , font=UpperCAmelCase ) return image def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , **UpperCAmelCase ) -> Any: requires_backends(UpperCAmelCase , 'vision' ) # Convert to PIL image if necessary snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ = render_text(UpperCAmelCase , **UpperCAmelCase ) snake_case_ = max(header_image.width , image.width ) snake_case_ = int(image.height * (new_width / image.width) ) snake_case_ = int(header_image.height * (new_width / header_image.width) ) snake_case_ = Image.new('RGB' , (new_width, new_height + new_header_height) , 'white' ) new_image.paste(header_image.resize((new_width, new_header_height) ) , (0, 0) ) new_image.paste(image.resize((new_width, new_height) ) , (0, new_header_height) ) # Convert back to the original framework if necessary snake_case_ = to_numpy_array(UpperCAmelCase ) if infer_channel_dimension_format(UpperCAmelCase ) == ChannelDimension.LAST: snake_case_ = to_channel_dimension_format(UpperCAmelCase , ChannelDimension.LAST ) return new_image class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["flattened_patches"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = 2048, lowerCAmelCase__ = False, **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = patch_size if patch_size is not None else {'height': 16, 'width': 16} snake_case_ = do_normalize snake_case_ = do_convert_rgb snake_case_ = max_patches snake_case_ = is_vqa def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, **lowerCAmelCase__) -> np.ndarray: requires_backends(self.extract_flattened_patches, 'torch') _check_torch_version() # convert to torch snake_case_ = to_channel_dimension_format(lowerCAmelCase__, ChannelDimension.FIRST) snake_case_ = torch.from_numpy(lowerCAmelCase__) snake_case_ , snake_case_ = patch_size['height'], patch_size['width'] snake_case_ , snake_case_ = get_image_size(lowerCAmelCase__) # maximize scale s.t. snake_case_ = math.sqrt(max_patches * (patch_height / image_height) * (patch_width / image_width)) snake_case_ = max(min(math.floor(scale * image_height / patch_height), lowerCAmelCase__), 1) snake_case_ = max(min(math.floor(scale * image_width / patch_width), lowerCAmelCase__), 1) snake_case_ = max(num_feasible_rows * patch_height, 1) snake_case_ = max(num_feasible_cols * patch_width, 1) snake_case_ = torch.nn.functional.interpolate( image.unsqueeze(0), size=(resized_height, resized_width), mode='bilinear', align_corners=lowerCAmelCase__, antialias=lowerCAmelCase__, ).squeeze(0) # [1, rows, columns, patch_height * patch_width * image_channels] snake_case_ = torch_extract_patches(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = patches.shape snake_case_ = patches_shape[1] snake_case_ = patches_shape[2] snake_case_ = patches_shape[3] # [rows * columns, patch_height * patch_width * image_channels] snake_case_ = patches.reshape([rows * columns, depth]) # [rows * columns, 1] snake_case_ = torch.arange(lowerCAmelCase__).reshape([rows, 1]).repeat(1, lowerCAmelCase__).reshape([rows * columns, 1]) snake_case_ = torch.arange(lowerCAmelCase__).reshape([1, columns]).repeat(lowerCAmelCase__, 1).reshape([rows * columns, 1]) # Offset by 1 so the ids do not contain zeros, which represent padding. row_ids += 1 col_ids += 1 # Prepare additional patch features. # [rows * columns, 1] snake_case_ = row_ids.to(torch.floataa) snake_case_ = col_ids.to(torch.floataa) # [rows * columns, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.cat([row_ids, col_ids, patches], -1) # [max_patches, 2 + patch_height * patch_width * image_channels] snake_case_ = torch.nn.functional.pad(lowerCAmelCase__, [0, 0, 0, max_patches - (rows * columns)]).float() snake_case_ = to_numpy_array(lowerCAmelCase__) return result def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__) -> np.ndarray: if image.dtype == np.uinta: snake_case_ = image.astype(np.floataa) # take mean across the whole `image` snake_case_ = np.mean(lowerCAmelCase__) snake_case_ = np.std(lowerCAmelCase__) snake_case_ = max(lowerCAmelCase__, 1.0 / math.sqrt(np.prod(image.shape))) return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> ImageInput: snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb snake_case_ = patch_size if patch_size is not None else self.patch_size snake_case_ = max_patches if max_patches is not None else self.max_patches snake_case_ = self.is_vqa if kwargs.get('data_format', lowerCAmelCase__) is not None: raise ValueError('data_format is not an accepted input as the outputs are ') snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') # PIL RGBA images are converted to RGB if do_convert_rgb: snake_case_ = [convert_to_rgb(lowerCAmelCase__) for image in images] # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] if is_vqa: if header_text is None: raise ValueError('A header text must be provided for VQA models.') snake_case_ = kwargs.pop('font_bytes', lowerCAmelCase__) snake_case_ = kwargs.pop('font_path', lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = [header_text] * len(lowerCAmelCase__) snake_case_ = [ render_header(lowerCAmelCase__, header_text[i], font_bytes=lowerCAmelCase__, font_path=lowerCAmelCase__) for i, image in enumerate(lowerCAmelCase__) ] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__) for image in images] # convert to torch tensor and permute snake_case_ = [ self.extract_flattened_patches(image=lowerCAmelCase__, max_patches=lowerCAmelCase__, patch_size=lowerCAmelCase__) for image in images ] # create attention mask in numpy snake_case_ = [(image.sum(axis=-1) != 0).astype(np.floataa) for image in images] snake_case_ = BatchFeature( data={'flattened_patches': images, 'attention_mask': attention_masks}, tensor_type=lowerCAmelCase__) return encoded_outputs
312
1
"""simple docstring""" import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/test_sentencepiece.model''') if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right __UpperCamelCase = 25_0004 __UpperCamelCase = 25_0020 @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = MBartTokenizer SCREAMING_SNAKE_CASE_ = MBartTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> Optional[Any]: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = MBartTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) tokenizer.save_pretrained(self.tmpdirname) def a_ ( self) -> Dict: snake_case_ = MBartTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = tokenizer.tokenize('This is a test') self.assertListEqual(lowerCAmelCase__, ['▁This', '▁is', '▁a', '▁t', 'est']) self.assertListEqual( tokenizer.convert_tokens_to_ids(lowerCAmelCase__), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) snake_case_ = tokenizer.tokenize('I was born in 92000, and this is falsé.') self.assertListEqual( lowerCAmelCase__, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '9', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', 'é', '.', ], ) snake_case_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ], ) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual( lowerCAmelCase__, [ SPIECE_UNDERLINE + 'I', SPIECE_UNDERLINE + 'was', SPIECE_UNDERLINE + 'b', 'or', 'n', SPIECE_UNDERLINE + 'in', SPIECE_UNDERLINE + '', '<unk>', '2', '0', '0', '0', ',', SPIECE_UNDERLINE + 'and', SPIECE_UNDERLINE + 'this', SPIECE_UNDERLINE + 'is', SPIECE_UNDERLINE + 'f', 'al', 's', '<unk>', '.', ], ) def a_ ( self) -> Any: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return snake_case_ = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-mbart', {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})'): snake_case_ = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self.tokenizer_class.from_pretrained(lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__) snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files)) snake_case_ = tuple(f for f in tokenizer_r_files if 'tokenizer.json' not in f) self.assertSequenceEqual(lowerCAmelCase__, lowerCAmelCase__) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__) snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__)) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(lowerCAmelCase__) # Save tokenizer rust, legacy_format=True snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__, legacy_format=lowerCAmelCase__) snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__) # Checks it save with the same files self.assertSequenceEqual(lowerCAmelCase__, lowerCAmelCase__) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__) snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__)) shutil.rmtree(lowerCAmelCase__) # Save tokenizer rust, legacy_format=False snake_case_ = tempfile.mkdtemp() snake_case_ = tokenizer_r.save_pretrained(lowerCAmelCase__, legacy_format=lowerCAmelCase__) snake_case_ = tokenizer_p.save_pretrained(lowerCAmelCase__) # Checks it saved the tokenizer.json file self.assertTrue(any('tokenizer.json' in f for f in tokenizer_r_files)) # Checks everything loads correctly in the same way snake_case_ = tokenizer_r.from_pretrained(lowerCAmelCase__) snake_case_ = tokenizer_p.from_pretrained(lowerCAmelCase__) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(lowerCAmelCase__, lowerCAmelCase__)) shutil.rmtree(lowerCAmelCase__) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ = "facebook/mbart-large-en-ro" SCREAMING_SNAKE_CASE_ = [ " UN Chief Says There Is No Military Solution in Syria", " Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.", ] SCREAMING_SNAKE_CASE_ = [ "Şeful ONU declară că nu există o soluţie militară în Siria", "Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei" " pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor" " face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.", ] SCREAMING_SNAKE_CASE_ = [8_2_7_4, 1_2_7_8_7_3, 2_5_9_1_6, 7, 8_6_2_2, 2_0_7_1, 4_3_8, 6_7_4_8_5, 5_3, 1_8_7_8_9_5, 2_3, 5_1_7_1_2, 2, EN_CODE] @classmethod def a_ ( cls) -> Tuple: snake_case_ = MBartTokenizer.from_pretrained( cls.checkpoint_name, src_lang='en_XX', tgt_lang='ro_RO') snake_case_ = 1 return cls def a_ ( self) -> str: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ar_AR'], 25_0001) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['en_EN'], 25_0004) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['ro_RO'], 25_0020) def a_ ( self) -> Any: snake_case_ = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0] self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__) def a_ ( self) -> Any: self.assertIn(lowerCAmelCase__, self.tokenizer.all_special_ids) snake_case_ = [RO_CODE, 884, 9019, 96, 9, 916, 8_6792, 36, 1_8743, 1_5596, 5, 2] snake_case_ = self.tokenizer.decode(lowerCAmelCase__, skip_special_tokens=lowerCAmelCase__) snake_case_ = self.tokenizer.decode(generated_ids[1:], skip_special_tokens=lowerCAmelCase__) self.assertEqual(lowerCAmelCase__, lowerCAmelCase__) self.assertNotIn(self.tokenizer.eos_token, lowerCAmelCase__) def a_ ( self) -> str: snake_case_ = ['this is gunna be a long sentence ' * 20] assert isinstance(src_text[0], lowerCAmelCase__) snake_case_ = 10 snake_case_ = self.tokenizer(lowerCAmelCase__, max_length=lowerCAmelCase__, truncation=lowerCAmelCase__).input_ids[0] self.assertEqual(ids[-2], 2) self.assertEqual(ids[-1], lowerCAmelCase__) self.assertEqual(len(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> str: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['<mask>', 'ar_AR']), [25_0026, 25_0001]) def a_ ( self) -> List[Any]: snake_case_ = tempfile.mkdtemp() snake_case_ = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(lowerCAmelCase__) snake_case_ = MBartTokenizer.from_pretrained(lowerCAmelCase__) self.assertDictEqual(new_tok.fairseq_tokens_to_ids, lowerCAmelCase__) @require_torch def a_ ( self) -> int: snake_case_ = self.tokenizer(self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, return_tensors='pt') snake_case_ = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def a_ ( self) -> str: snake_case_ = self.tokenizer( self.src_text, text_target=self.tgt_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=len(self.expected_src_tokens), return_tensors='pt', ) snake_case_ = shift_tokens_right(batch['labels'], self.tokenizer.pad_token_id) self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual((2, 14), batch.input_ids.shape) self.assertEqual((2, 14), batch.attention_mask.shape) snake_case_ = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens, lowerCAmelCase__) self.assertEqual(2, batch.decoder_input_ids[0, -1]) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens, []) self.assertEqual(self.tokenizer.suffix_tokens, [self.tokenizer.eos_token_id, EN_CODE]) def a_ ( self) -> Optional[Any]: snake_case_ = self.tokenizer(self.src_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=3, return_tensors='pt') snake_case_ = self.tokenizer( text_target=self.tgt_text, padding=lowerCAmelCase__, truncation=lowerCAmelCase__, max_length=10, return_tensors='pt') snake_case_ = targets['input_ids'] snake_case_ = shift_tokens_right(lowerCAmelCase__, self.tokenizer.pad_token_id) self.assertEqual(batch.input_ids.shape[1], 3) self.assertEqual(batch.decoder_input_ids.shape[1], 10) @require_torch def a_ ( self) -> Any: snake_case_ = self.tokenizer._build_translation_inputs( 'A test', return_tensors='pt', src_lang='en_XX', tgt_lang='ar_AR') self.assertEqual( nested_simplify(lowerCAmelCase__), { # A, test, EOS, en_XX 'input_ids': [[62, 3034, 2, 25_0004]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 25_0001, }, )
312
"""simple docstring""" from math import pi def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> float: return 2 * pi * radius * (angle / 360) if __name__ == "__main__": print(arc_length(90, 10))
312
1
"""simple docstring""" from __future__ import annotations __UpperCamelCase = { '''A''': ['''B''', '''C''', '''E'''], '''B''': ['''A''', '''D''', '''E'''], '''C''': ['''A''', '''F''', '''G'''], '''D''': ['''B'''], '''E''': ['''A''', '''B''', '''D'''], '''F''': ['''C'''], '''G''': ['''C'''], } class UpperCamelCase : def __init__( self, lowerCAmelCase__, lowerCAmelCase__) -> None: snake_case_ = graph # mapping node to its parent in resulting breadth first tree snake_case_ = {} snake_case_ = source_vertex def a_ ( self) -> None: snake_case_ = {self.source_vertex} snake_case_ = None snake_case_ = [self.source_vertex] # first in first out queue while queue: snake_case_ = queue.pop(0) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(lowerCAmelCase__) snake_case_ = vertex queue.append(lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> str: if target_vertex == self.source_vertex: return self.source_vertex snake_case_ = self.parent.get(lowerCAmelCase__) if target_vertex_parent is None: snake_case_ = ( f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}' ) raise ValueError(lowerCAmelCase__) return self.shortest_path(lowerCAmelCase__) + f'->{target_vertex}' if __name__ == "__main__": __UpperCamelCase = Graph(graph, '''G''') g.breath_first_search() print(g.shortest_path('''D''')) print(g.shortest_path('''G''')) print(g.shortest_path('''Foo'''))
312
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "trajectory_transformer" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = action_weight snake_case_ = reward_weight snake_case_ = value_weight snake_case_ = max_position_embeddings snake_case_ = block_size snake_case_ = action_dim snake_case_ = observation_dim snake_case_ = transition_dim snake_case_ = learning_rate snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_embd snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = resid_pdrop snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = kaiming_initializer_range snake_case_ = use_cache super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
1
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "pixel_values" SCREAMING_SNAKE_CASE_ = False SCREAMING_SNAKE_CASE_ = TimmBackboneConfig def __init__( self, lowerCAmelCase__, **lowerCAmelCase__) -> Tuple: requires_backends(self, 'timm') super().__init__(lowerCAmelCase__) snake_case_ = config if config.backbone is None: raise ValueError('backbone is not set in the config. Please set it to a timm model name.') if config.backbone not in timm.list_models(): raise ValueError(f'backbone {config.backbone} is not supported by timm.') if hasattr(lowerCAmelCase__, 'out_features') and config.out_features is not None: raise ValueError('out_features is not supported by TimmBackbone. Please use out_indices instead.') snake_case_ = getattr(lowerCAmelCase__, 'use_pretrained_backbone', lowerCAmelCase__) if pretrained is None: raise ValueError('use_pretrained_backbone is not set in the config. Please set it to True or False.') # We just take the final layer by default. This matches the default for the transformers models. snake_case_ = config.out_indices if getattr(lowerCAmelCase__, 'out_indices', lowerCAmelCase__) is not None else (-1,) snake_case_ = timm.create_model( config.backbone, pretrained=lowerCAmelCase__, features_only=config.features_only, in_chans=config.num_channels, out_indices=lowerCAmelCase__, **lowerCAmelCase__, ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. snake_case_ = self._backbone.return_layers snake_case_ = {layer['module']: str(lowerCAmelCase__) for i, layer in enumerate(self._backbone.feature_info.info)} super()._init_backbone(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__, *lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]: requires_backends(cls, ['vision', 'timm']) from ...models.timm_backbone import TimmBackboneConfig snake_case_ = kwargs.pop('config', TimmBackboneConfig()) snake_case_ = kwargs.pop('use_timm_backbone', lowerCAmelCase__) if not use_timm: raise ValueError('use_timm_backbone must be True for timm backbones') snake_case_ = kwargs.pop('num_channels', config.num_channels) snake_case_ = kwargs.pop('features_only', config.features_only) snake_case_ = kwargs.pop('use_pretrained_backbone', config.use_pretrained_backbone) snake_case_ = kwargs.pop('out_indices', config.out_indices) snake_case_ = TimmBackboneConfig( backbone=lowerCAmelCase__, num_channels=lowerCAmelCase__, features_only=lowerCAmelCase__, use_pretrained_backbone=lowerCAmelCase__, out_indices=lowerCAmelCase__, ) return super()._from_config(lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__) -> Tuple: pass def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[BackboneOutput, Tuple[Tensor, ...]]: snake_case_ = return_dict if return_dict is not None else self.config.use_return_dict snake_case_ = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) snake_case_ = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError('Cannot output attentions for timm backbones at the moment') if output_hidden_states: # We modify the return layers to include all the stages of the backbone snake_case_ = self._all_layers snake_case_ = self._backbone(lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = self._return_layers snake_case_ = tuple(hidden_states[i] for i in self.out_indices) else: snake_case_ = self._backbone(lowerCAmelCase__, **lowerCAmelCase__) snake_case_ = None snake_case_ = tuple(lowerCAmelCase__) snake_case_ = tuple(lowerCAmelCase__) if hidden_states is not None else None if not return_dict: snake_case_ = (feature_maps,) if output_hidden_states: snake_case_ = output + (hidden_states,) return output return BackboneOutput(feature_maps=lowerCAmelCase__, hidden_states=lowerCAmelCase__, attentions=lowerCAmelCase__)
312
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCamelCase ( metaclass=lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["keras_nlp"] def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> int: requires_backends(self, ['keras_nlp'])
312
1
"""simple docstring""" import os def UpperCAmelCase ( UpperCAmelCase = "input.txt" ) -> int: with open(os.path.join(os.path.dirname(UpperCAmelCase ) , UpperCAmelCase ) ) as input_file: snake_case_ = [ [int(UpperCAmelCase ) for element in line.split(',' )] for line in input_file.readlines() ] snake_case_ = len(UpperCAmelCase ) snake_case_ = len(matrix[0] ) snake_case_ = [[-1 for _ in range(UpperCAmelCase )] for _ in range(UpperCAmelCase )] for i in range(UpperCAmelCase ): snake_case_ = matrix[i][0] for j in range(1 , UpperCAmelCase ): for i in range(UpperCAmelCase ): snake_case_ = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , UpperCAmelCase ): snake_case_ = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): snake_case_ = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F"""{solution() = }""")
312
"""simple docstring""" import os import numpy import onnx def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = a.name snake_case_ = b.name snake_case_ = '' snake_case_ = '' snake_case_ = a == b snake_case_ = name_a snake_case_ = name_b return res def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase , UpperCAmelCase ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase , UpperCAmelCase ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Union[str, Any]: for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Any: snake_case_ = list(model.graph.initializer ) snake_case_ = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i snake_case_ = inits[i].name snake_case_ = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase , UpperCAmelCase ) def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = os.path.dirname(UpperCAmelCase ) snake_case_ = os.path.basename(UpperCAmelCase ) snake_case_ = onnx.load(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) snake_case_ = list(model.graph.initializer ) snake_case_ = set() snake_case_ = {} snake_case_ = [] snake_case_ = 0 for i in range(len(UpperCAmelCase ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase ) dup_set.add(UpperCAmelCase ) snake_case_ = inits[j].data_type snake_case_ = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 11: mem_size *= 8 else: print('unexpected data type: ' , UpperCAmelCase ) total_reduced_size += mem_size snake_case_ = inits[i].name snake_case_ = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase ) else: snake_case_ = [name_j] ind_to_replace.append((j, i) ) print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' ) snake_case_ = sorted(UpperCAmelCase ) _remove_dup_initializers_from_model(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'optimized_' + model_file_name snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) onnx.save(UpperCAmelCase , UpperCAmelCase ) return new_model
312
1
"""simple docstring""" from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = 42 SCREAMING_SNAKE_CASE_ = 42 class UpperCamelCase : def __init__( self, lowerCAmelCase__) -> Any: snake_case_ = [[] for _ in range(lowerCAmelCase__)] snake_case_ = size def __getitem__( self, lowerCAmelCase__) -> Iterator[Edge]: return iter(self._graph[vertex]) @property def a_ ( self) -> Dict: return self._size def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) -> Dict: if weight not in (0, 1): raise ValueError('Edge weight must be either 0 or 1.') if to_vertex < 0 or to_vertex >= self.size: raise ValueError('Vertex indexes must be in [0; size).') self._graph[from_vertex].append(Edge(lowerCAmelCase__, lowerCAmelCase__)) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> int | None: snake_case_ = deque([start_vertex]) snake_case_ = [None] * self.size snake_case_ = 0 while queue: snake_case_ = queue.popleft() snake_case_ = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: snake_case_ = current_distance + edge.weight snake_case_ = distances[edge.destination_vertex] if ( isinstance(lowerCAmelCase__, lowerCAmelCase__) and new_distance >= dest_vertex_distance ): continue snake_case_ = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex) else: queue.append(edge.destination_vertex) if distances[finish_vertex] is None: raise ValueError('No path from start_vertex to finish_vertex.') return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: __UpperCamelCase = None __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = {'''vocab_file''': '''sentencepiece.bpe.model''', '''tokenizer_file''': '''tokenizer.json'''} __UpperCamelCase = { '''vocab_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model''' ), }, '''tokenizer_file''': { '''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json''', '''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json''', '''moussaKam/barthez-orangesum-title''': ( '''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json''' ), }, } __UpperCamelCase = { '''moussaKam/mbarthez''': 1024, '''moussaKam/barthez''': 1024, '''moussaKam/barthez-orangesum-title''': 1024, } __UpperCamelCase = '''▁''' class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ = ["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ = BarthezTokenizer def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__="<s>", lowerCAmelCase__="</s>", lowerCAmelCase__="</s>", lowerCAmelCase__="<s>", lowerCAmelCase__="<unk>", lowerCAmelCase__="<pad>", lowerCAmelCase__="<mask>", **lowerCAmelCase__, ) -> List[str]: # Mask token behave like a normal word, i.e. include the space before it snake_case_ = AddedToken(lowerCAmelCase__, lstrip=lowerCAmelCase__, rstrip=lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) else mask_token super().__init__( lowerCAmelCase__, tokenizer_file=lowerCAmelCase__, bos_token=lowerCAmelCase__, eos_token=lowerCAmelCase__, unk_token=lowerCAmelCase__, sep_token=lowerCAmelCase__, cls_token=lowerCAmelCase__, pad_token=lowerCAmelCase__, mask_token=lowerCAmelCase__, **lowerCAmelCase__, ) snake_case_ = vocab_file snake_case_ = False if not self.vocab_file else True def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> List[int]: snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(lowerCAmelCase__): logger.error(f'Vocabulary path ({save_directory}) should be a directory') return snake_case_ = os.path.join( lowerCAmelCase__, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(lowerCAmelCase__): copyfile(self.vocab_file, lowerCAmelCase__) return (out_vocab_file,)
312
1
"""simple docstring""" from math import ceil def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Tuple: snake_case_ = list(range(0 , UpperCAmelCase ) ) snake_case_ = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check snake_case_ = [] for i in device_map_blocks: if device_map_blocks.count(UpperCAmelCase ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(UpperCAmelCase ) # Missing blocks snake_case_ = [i for i in blocks if i not in device_map_blocks] snake_case_ = [i for i in device_map_blocks if i not in blocks] if len(UpperCAmelCase ) != 0: raise ValueError( 'Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.' ' These attention blocks were specified more than once: ' + str(UpperCAmelCase ) ) if len(UpperCAmelCase ) != 0: raise ValueError( 'There are attention blocks for this model that are not specified in the device_map. Add these attention ' 'blocks to a device on the device_map: ' + str(UpperCAmelCase ) ) if len(UpperCAmelCase ) != 0: raise ValueError( 'The device_map contains more attention blocks than this model has. Remove these from the device_map:' + str(UpperCAmelCase ) ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> str: snake_case_ = list(range(UpperCAmelCase ) ) snake_case_ = int(ceil(n_layers / len(UpperCAmelCase ) ) ) snake_case_ = [layers[i : i + n_blocks] for i in range(0 , UpperCAmelCase , UpperCAmelCase )] return dict(zip(UpperCAmelCase , UpperCAmelCase ) )
312
"""simple docstring""" import functools def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> int: # Validation if not isinstance(UpperCAmelCase , UpperCAmelCase ) or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for day in days ): raise ValueError('The parameter days should be a list of integers' ) if len(UpperCAmelCase ) != 3 or not all(isinstance(UpperCAmelCase , UpperCAmelCase ) for cost in costs ): raise ValueError('The parameter costs should be a list of three integers' ) if len(UpperCAmelCase ) == 0: return 0 if min(UpperCAmelCase ) <= 0: raise ValueError('All days elements should be greater than 0' ) if max(UpperCAmelCase ) >= 366: raise ValueError('All days elements should be less than 366' ) snake_case_ = set(UpperCAmelCase ) @functools.cache def dynamic_programming(UpperCAmelCase ) -> int: if index > 365: return 0 if index not in days_set: return dynamic_programming(index + 1 ) return min( costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , ) return dynamic_programming(1 ) if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCAmelCase ( ) -> Dict: snake_case_ = ArgumentParser( description=( 'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes' ) ) # Optional arguments for the launch helper parser.add_argument('--num_cores' , type=UpperCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' ) # positional parser.add_argument( 'training_script' , type=UpperCAmelCase , help=( 'The full path to the single TPU training ' 'program/script to be launched in parallel, ' 'followed by all the arguments for the ' 'training script' ) , ) # rest from the training program parser.add_argument('training_script_args' , nargs=UpperCAmelCase ) return parser.parse_args() def UpperCAmelCase ( ) -> Tuple: snake_case_ = parse_args() # Import training_script as a module. snake_case_ = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) snake_case_ = script_fpath.stem snake_case_ = importlib.import_module(UpperCAmelCase ) # Patch sys.argv snake_case_ = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )] xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores ) if __name__ == "__main__": main()
312
"""simple docstring""" import copy import re class UpperCamelCase : SCREAMING_SNAKE_CASE_ = "hp" SCREAMING_SNAKE_CASE_ = {} SCREAMING_SNAKE_CASE_ = None @classmethod def a_ ( cls, lowerCAmelCase__, lowerCAmelCase__) -> Tuple: snake_case_ = prefix snake_case_ = defaults cls.build_naming_info() @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Optional[Any]: if len(lowerCAmelCase__) == 0: return "" snake_case_ = None if any(char.isdigit() for char in word): raise Exception(f'Parameters should not contain numbers: \'{word}\' contains a number') if word in info["short_word"]: return info["short_word"][word] for prefix_len in range(1, len(lowerCAmelCase__) + 1): snake_case_ = word[:prefix_len] if prefix in info["reverse_short_word"]: continue else: snake_case_ = prefix break if short_word is None: # Paranoid fallback def int_to_alphabetic(lowerCAmelCase__): snake_case_ = '' while integer != 0: snake_case_ = chr(ord('A') + integer % 10) + s integer //= 10 return s snake_case_ = 0 while True: snake_case_ = word + '#' + int_to_alphabetic(lowerCAmelCase__) if sword in info["reverse_short_word"]: continue else: snake_case_ = sword break snake_case_ = short_word snake_case_ = word return short_word @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = param_name.split('_') snake_case_ = [TrialShortNamer.shortname_for_word(lowerCAmelCase__, lowerCAmelCase__) for word in words] # We try to create a separatorless short name, but if there is a collision we have to fallback # to a separated short name snake_case_ = ['', '_'] for separator in separators: snake_case_ = separator.join(lowerCAmelCase__) if shortname not in info["reverse_short_param"]: snake_case_ = shortname snake_case_ = param_name return shortname return param_name @staticmethod def a_ ( lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: snake_case_ = TrialShortNamer.shortname_for_key(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = short_name snake_case_ = param_name @classmethod def a_ ( cls) -> List[str]: if cls.NAMING_INFO is not None: return snake_case_ = { 'short_word': {}, 'reverse_short_word': {}, 'short_param': {}, 'reverse_short_param': {}, } snake_case_ = list(cls.DEFAULTS.keys()) for k in field_keys: cls.add_new_param_name(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = info @classmethod def a_ ( cls, lowerCAmelCase__) -> List[Any]: cls.build_naming_info() assert cls.PREFIX is not None snake_case_ = [copy.copy(cls.PREFIX)] for k, v in params.items(): if k not in cls.DEFAULTS: raise Exception(f'You should provide a default value for the param name {k} with value {v}') if v == cls.DEFAULTS[k]: # The default value is not added to the name continue snake_case_ = cls.NAMING_INFO['short_param'][k] if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = 1 if v else 0 snake_case_ = '' if isinstance(lowerCAmelCase__, (int, float)) else '-' snake_case_ = f'{key}{sep}{v}' name.append(lowerCAmelCase__) return "_".join(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__) -> Optional[Any]: snake_case_ = repr[len(cls.PREFIX) + 1 :] if repr == "": snake_case_ = [] else: snake_case_ = repr.split('_') snake_case_ = {} for value in values: if "-" in value: snake_case_ , snake_case_ = value.split('-') else: snake_case_ = re.sub('[0-9.]', '', lowerCAmelCase__) snake_case_ = float(re.sub('[^0-9.]', '', lowerCAmelCase__)) snake_case_ = cls.NAMING_INFO['reverse_short_param'][p_k] snake_case_ = p_v for k in cls.DEFAULTS: if k not in parameters: snake_case_ = cls.DEFAULTS[k] return parameters
312
1
"""simple docstring""" from typing import Dict, Iterable, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract __UpperCamelCase = logging.get_logger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> List[str]: return [ int(1000 * (box[0] / width) ), int(1000 * (box[1] / height) ), int(1000 * (box[2] / width) ), int(1000 * (box[3] / height) ), ] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> str: snake_case_ = to_pil_image(UpperCAmelCase ) snake_case_ , snake_case_ = pil_image.size snake_case_ = pytesseract.image_to_data(UpperCAmelCase , lang=UpperCAmelCase , output_type='dict' , config=UpperCAmelCase ) snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ = data['text'], data['left'], data['top'], data['width'], data['height'] # filter empty words and corresponding coordinates snake_case_ = [idx for idx, word in enumerate(UpperCAmelCase ) if not word.strip()] snake_case_ = [word for idx, word in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices] snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices] snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices] snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices] snake_case_ = [coord for idx, coord in enumerate(UpperCAmelCase ) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format snake_case_ = [] for x, y, w, h in zip(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): snake_case_ = [x, y, x + w, y + h] actual_boxes.append(UpperCAmelCase ) # finally, normalize the bounding boxes snake_case_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) assert len(UpperCAmelCase ) == len(UpperCAmelCase ), "Not as many words as there are bounding boxes" return words, normalized_boxes class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = ["pixel_values"] def __init__( self, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = PILImageResampling.BILINEAR, lowerCAmelCase__ = True, lowerCAmelCase__ = 1 / 255, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = True, lowerCAmelCase__ = None, lowerCAmelCase__ = "", **lowerCAmelCase__, ) -> None: super().__init__(**lowerCAmelCase__) snake_case_ = size if size is not None else {'height': 224, 'width': 224} snake_case_ = get_size_dict(lowerCAmelCase__) snake_case_ = do_resize snake_case_ = size snake_case_ = resample snake_case_ = do_rescale snake_case_ = rescale_value snake_case_ = do_normalize snake_case_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN snake_case_ = image_std if image_std is not None else IMAGENET_STANDARD_STD snake_case_ = apply_ocr snake_case_ = ocr_lang snake_case_ = tesseract_config def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = PILImageResampling.BILINEAR, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: snake_case_ = get_size_dict(lowerCAmelCase__) if "height" not in size or "width" not in size: raise ValueError(f'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}') snake_case_ = (size['height'], size['width']) return resize(lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: return rescale(lowerCAmelCase__, scale=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ = None, **lowerCAmelCase__, ) -> np.ndarray: return normalize(lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__, data_format=lowerCAmelCase__, **lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__ = ChannelDimension.FIRST, **lowerCAmelCase__, ) -> PIL.Image.Image: snake_case_ = do_resize if do_resize is not None else self.do_resize snake_case_ = size if size is not None else self.size snake_case_ = get_size_dict(lowerCAmelCase__) snake_case_ = resample if resample is not None else self.resample snake_case_ = do_rescale if do_rescale is not None else self.do_rescale snake_case_ = rescale_factor if rescale_factor is not None else self.rescale_factor snake_case_ = do_normalize if do_normalize is not None else self.do_normalize snake_case_ = image_mean if image_mean is not None else self.image_mean snake_case_ = image_std if image_std is not None else self.image_std snake_case_ = apply_ocr if apply_ocr is not None else self.apply_ocr snake_case_ = ocr_lang if ocr_lang is not None else self.ocr_lang snake_case_ = tesseract_config if tesseract_config is not None else self.tesseract_config snake_case_ = make_list_of_images(lowerCAmelCase__) if not valid_images(lowerCAmelCase__): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.') if do_resize and size is None: raise ValueError('Size must be specified if do_resize is True.') if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.') if do_normalize and (image_mean is None or image_std is None): raise ValueError('If do_normalize is True, image_mean and image_std must be specified.') # All transformations expect numpy arrays. snake_case_ = [to_numpy_array(lowerCAmelCase__) for image in images] # Tesseract OCR to get words + normalized bounding boxes if apply_ocr: requires_backends(self, 'pytesseract') snake_case_ = [] snake_case_ = [] for image in images: snake_case_ , snake_case_ = apply_tesseract(lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__) words_batch.append(lowerCAmelCase__) boxes_batch.append(lowerCAmelCase__) if do_resize: snake_case_ = [self.resize(image=lowerCAmelCase__, size=lowerCAmelCase__, resample=lowerCAmelCase__) for image in images] if do_rescale: snake_case_ = [self.rescale(image=lowerCAmelCase__, scale=lowerCAmelCase__) for image in images] if do_normalize: snake_case_ = [self.normalize(image=lowerCAmelCase__, mean=lowerCAmelCase__, std=lowerCAmelCase__) for image in images] snake_case_ = [to_channel_dimension_format(lowerCAmelCase__, lowerCAmelCase__) for image in images] snake_case_ = BatchFeature(data={'pixel_values': images}, tensor_type=lowerCAmelCase__) if apply_ocr: snake_case_ = words_batch snake_case_ = boxes_batch return data
312
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( 'files' , [ ['full:README.md', 'dataset_infos.json'], ['empty:README.md', 'dataset_infos.json'], ['dataset_infos.json'], ['full:README.md'], ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = tmp_path_factory.mktemp('dset_infos_dir' ) if "full:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('---\ndataset_info:\n dataset_size: 42\n---' ) if "empty:README.md" in files: with open(dataset_infos_dir / 'README.md' , 'w' ) as f: f.write('' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / 'dataset_infos.json' , 'w' ) as f: f.write('{"default": {"dataset_size": 42}}' ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( 'dataset_info' , [ DatasetInfo(), DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: snake_case_ = str(UpperCAmelCase ) dataset_info.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfo.from_directory(UpperCAmelCase ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCAmelCase , 'dataset_info.json' ) ) def UpperCAmelCase ( ) -> Union[str, Any]: snake_case_ = DatasetInfo( description='foo' , citation='bar' , homepage='https://foo.bar' , license='CC0' , features=Features({'a': Value('int32' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train', 'num_examples': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) snake_case_ = dataset_info._to_yaml_dict() assert sorted(UpperCAmelCase ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) snake_case_ = yaml.safe_dump(UpperCAmelCase ) snake_case_ = yaml.safe_load(UpperCAmelCase ) assert dataset_info_yaml_dict == reloaded def UpperCAmelCase ( ) -> Optional[Any]: snake_case_ = DatasetInfo() snake_case_ = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( 'dataset_infos_dict' , [ DatasetInfosDict(), DatasetInfosDict({'default': DatasetInfo()} ), DatasetInfosDict({'my_config_name': DatasetInfo()} ), DatasetInfosDict( { 'default': DatasetInfo( description='foo' , features=Features({'a': Value('int32' )} ) , builder_name='builder' , config_name='config' , version='1.0.0' , splits=[{'name': 'train'}] , download_size=42 , ) } ), DatasetInfosDict( { 'v1': DatasetInfo(dataset_size=42 ), 'v2': DatasetInfo(dataset_size=1337 ), } ), ] , ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[str]: snake_case_ = str(UpperCAmelCase ) dataset_infos_dict.write_to_directory(UpperCAmelCase ) snake_case_ = DatasetInfosDict.from_directory(UpperCAmelCase ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): snake_case_ = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml snake_case_ = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCAmelCase , 'README.md' ) )
312
1
"""simple docstring""" from math import isqrt def UpperCAmelCase ( UpperCAmelCase ) -> list[int]: snake_case_ = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCAmelCase , UpperCAmelCase ): snake_case_ = False return [i for i in range(2 , UpperCAmelCase ) if is_prime[i]] def UpperCAmelCase ( UpperCAmelCase = 10**8 ) -> int: snake_case_ = calculate_prime_numbers(max_number // 2 ) snake_case_ = 0 snake_case_ = 0 snake_case_ = len(UpperCAmelCase ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(F"""{solution() = }""")
312
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = StableDiffusionInpaintPipeline SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS SCREAMING_SNAKE_CASE_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE_ = frozenset([] ) def a_ ( self) -> Any: torch.manual_seed(0) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=9, out_channels=4, down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D'), up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D'), cross_attention_dim=32, attention_head_dim=(2, 4), use_linear_projection=lowerCAmelCase__, ) snake_case_ = PNDMScheduler(skip_prk_steps=lowerCAmelCase__) torch.manual_seed(0) snake_case_ = AutoencoderKL( block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'], up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'], latent_channels=4, sample_size=128, ) torch.manual_seed(0) snake_case_ = CLIPTextConfig( bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1e-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1000, hidden_act='gelu', projection_dim=512, ) snake_case_ = CLIPTextModel(lowerCAmelCase__) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip') snake_case_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=0) -> List[str]: # TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched snake_case_ = floats_tensor((1, 3, 32, 32), rng=random.Random(lowerCAmelCase__)).to(lowerCAmelCase__) snake_case_ = image.cpu().permute(0, 2, 3, 1)[0] snake_case_ = Image.fromarray(np.uinta(lowerCAmelCase__)).convert('RGB').resize((64, 64)) snake_case_ = Image.fromarray(np.uinta(image + 4)).convert('RGB').resize((64, 64)) if str(lowerCAmelCase__).startswith('mps'): snake_case_ = torch.manual_seed(lowerCAmelCase__) else: snake_case_ = torch.Generator(device=lowerCAmelCase__).manual_seed(lowerCAmelCase__) snake_case_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': init_image, 'mask_image': mask_image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'output_type': 'numpy', } return inputs def a_ ( self) -> Dict: snake_case_ = 'cpu' # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = StableDiffusionInpaintPipeline(**lowerCAmelCase__) snake_case_ = sd_pipe.to(lowerCAmelCase__) sd_pipe.set_progress_bar_config(disable=lowerCAmelCase__) snake_case_ = self.get_dummy_inputs(lowerCAmelCase__) snake_case_ = sd_pipe(**lowerCAmelCase__).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) snake_case_ = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476]) assert np.abs(image_slice.flatten() - expected_slice).max() < 1e-2 def a_ ( self) -> Union[str, Any]: super().test_inference_batch_single_identical(expected_max_diff=3e-3) @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def a_ ( self) -> Optional[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def a_ ( self) -> Union[str, Any]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained(lowerCAmelCase__, safety_checker=lowerCAmelCase__) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 9e-3 def a_ ( self) -> Optional[int]: snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint' '/yellow_cat_sitting_on_a_park_bench_fp16.npy') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, torch_dtype=torch.floataa, safety_checker=lowerCAmelCase__, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, output_type='np', ) snake_case_ = output.images[0] assert image.shape == (512, 512, 3) assert np.abs(expected_image - image).max() < 5e-1 def a_ ( self) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/sd2-inpaint/init_image.png') snake_case_ = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png') snake_case_ = 'stabilityai/stable-diffusion-2-inpainting' snake_case_ = PNDMScheduler.from_pretrained(lowerCAmelCase__, subfolder='scheduler') snake_case_ = StableDiffusionInpaintPipeline.from_pretrained( lowerCAmelCase__, safety_checker=lowerCAmelCase__, scheduler=lowerCAmelCase__, torch_dtype=torch.floataa, ) pipe.to(lowerCAmelCase__) pipe.set_progress_bar_config(disable=lowerCAmelCase__) pipe.enable_attention_slicing(1) pipe.enable_sequential_cpu_offload() snake_case_ = 'Face of a yellow cat, high resolution, sitting on a park bench' snake_case_ = torch.manual_seed(0) snake_case_ = pipe( prompt=lowerCAmelCase__, image=lowerCAmelCase__, mask_image=lowerCAmelCase__, generator=lowerCAmelCase__, num_inference_steps=2, output_type='np', ) snake_case_ = torch.cuda.max_memory_allocated() # make sure that less than 2.65 GB is allocated assert mem_bytes < 2.65 * 10**9
312
1
"""simple docstring""" import uuid from typing import Any, Dict, List, Optional, Union from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf if is_torch_available(): import torch __UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase : def __init__( self, lowerCAmelCase__ = None, lowerCAmelCase__ = None, lowerCAmelCase__=None, lowerCAmelCase__=None) -> Optional[Any]: if not conversation_id: snake_case_ = uuid.uuida() if past_user_inputs is None: snake_case_ = [] if generated_responses is None: snake_case_ = [] snake_case_ = conversation_id snake_case_ = past_user_inputs snake_case_ = generated_responses snake_case_ = text def __eq__( self, lowerCAmelCase__) -> List[str]: if not isinstance(lowerCAmelCase__, lowerCAmelCase__): return False if self.uuid == other.uuid: return True return ( self.new_user_input == other.new_user_input and self.past_user_inputs == other.past_user_inputs and self.generated_responses == other.generated_responses ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__ = False) -> Any: if self.new_user_input: if overwrite: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" was overwritten ' f'with: "{text}".') snake_case_ = text else: logger.warning( f'User input added while unprocessed input was existing: "{self.new_user_input}" new input ' f'ignored: "{text}". Set `overwrite` to True to overwrite unprocessed user input') else: snake_case_ = text def a_ ( self) -> List[str]: if self.new_user_input: self.past_user_inputs.append(self.new_user_input) snake_case_ = None def a_ ( self, lowerCAmelCase__) -> List[str]: self.generated_responses.append(lowerCAmelCase__) def a_ ( self) -> int: for user_input, generated_response in zip(self.past_user_inputs, self.generated_responses): yield True, user_input yield False, generated_response if self.new_user_input: yield True, self.new_user_input def __repr__( self) -> Optional[int]: snake_case_ = f'Conversation id: {self.uuid} \n' for is_user, text in self.iter_texts(): snake_case_ = 'user' if is_user else 'bot' output += f'{name} >> {text} \n' return output @add_end_docstrings( lowerCAmelCase__ , R"\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n " , ) class UpperCamelCase ( lowerCAmelCase__ ): def __init__( self, *lowerCAmelCase__, **lowerCAmelCase__) -> Union[str, Any]: super().__init__(*lowerCAmelCase__, **lowerCAmelCase__) if self.tokenizer.pad_token_id is None: snake_case_ = self.tokenizer.eos_token def a_ ( self, lowerCAmelCase__=None, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__) -> Union[str, Any]: snake_case_ = {} snake_case_ = {} snake_case_ = {} if min_length_for_response is not None: snake_case_ = min_length_for_response if minimum_tokens is not None: snake_case_ = minimum_tokens if "max_length" in generate_kwargs: snake_case_ = generate_kwargs['max_length'] # self.max_length = generate_kwargs.get("max_length", self.model.config.max_length) if clean_up_tokenization_spaces is not None: snake_case_ = clean_up_tokenization_spaces if generate_kwargs: forward_params.update(lowerCAmelCase__) return preprocess_params, forward_params, postprocess_params def __call__( self, lowerCAmelCase__, lowerCAmelCase__=0, **lowerCAmelCase__) -> Union[str, Any]: snake_case_ = super().__call__(lowerCAmelCase__, num_workers=lowerCAmelCase__, **lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__) and len(lowerCAmelCase__) == 1: return outputs[0] return outputs def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=32) -> Dict[str, Any]: if not isinstance(lowerCAmelCase__, lowerCAmelCase__): raise ValueError('ConversationalPipeline, expects Conversation as inputs') if conversation.new_user_input is None: raise ValueError( f'Conversation with UUID {type(conversation.uuid)} does not contain new user input to process. ' 'Add user inputs with the conversation\'s `add_user_input` method') if hasattr(self.tokenizer, '_build_conversation_input_ids'): snake_case_ = self.tokenizer._build_conversation_input_ids(lowerCAmelCase__) else: # If the tokenizer cannot handle conversations, we default to only the old version snake_case_ = self._legacy_parse_and_tokenize(lowerCAmelCase__) if self.framework == "pt": snake_case_ = torch.LongTensor([input_ids]) elif self.framework == "tf": snake_case_ = tf.constant([input_ids]) return {"input_ids": input_ids, "conversation": conversation} def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=10, **lowerCAmelCase__) -> Optional[Any]: snake_case_ = generate_kwargs.get('max_length', self.model.config.max_length) snake_case_ = model_inputs['input_ids'].shape[1] if max_length - minimum_tokens < n: logger.warning(f'Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})') snake_case_ = max_length - minimum_tokens snake_case_ = model_inputs['input_ids'][:, -trim:] if "attention_mask" in model_inputs: snake_case_ = model_inputs['attention_mask'][:, -trim:] snake_case_ = model_inputs.pop('conversation') snake_case_ = max_length snake_case_ = self.model.generate(**lowerCAmelCase__, **lowerCAmelCase__) if self.model.config.is_encoder_decoder: snake_case_ = 1 else: snake_case_ = n return {"output_ids": output_ids[:, start_position:], "conversation": conversation} def a_ ( self, lowerCAmelCase__, lowerCAmelCase__=True) -> List[str]: snake_case_ = model_outputs['output_ids'] snake_case_ = self.tokenizer.decode( output_ids[0], skip_special_tokens=lowerCAmelCase__, clean_up_tokenization_spaces=lowerCAmelCase__, ) snake_case_ = model_outputs['conversation'] conversation.mark_processed() conversation.append_response(lowerCAmelCase__) return conversation def a_ ( self, lowerCAmelCase__) -> Dict: snake_case_ = self.tokenizer.eos_token_id snake_case_ = [] for is_user, text in conversation.iter_texts(): if eos_token_id is not None: input_ids.extend(self.tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) + [eos_token_id]) else: input_ids.extend(self.tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) if len(lowerCAmelCase__) > self.tokenizer.model_max_length: snake_case_ = input_ids[-self.tokenizer.model_max_length :] return input_ids
312
"""simple docstring""" import logging import os from dataclasses import dataclass, field from typing import Dict, Optional import numpy as np from utils_multiple_choice import MultipleChoiceDataset, Split, processors import transformers from transformers import ( AutoConfig, AutoModelForMultipleChoice, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __UpperCamelCase = logging.getLogger(__name__) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Any: return (preds == labels).mean() @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCamelCase : SCREAMING_SNAKE_CASE_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(processors.keys() )} ) SCREAMING_SNAKE_CASE_ = field(metadata={"help": "Should contain the data files for the task."} ) SCREAMING_SNAKE_CASE_ = field( default=1_2_8 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) SCREAMING_SNAKE_CASE_ = field( default=lowerCAmelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def UpperCAmelCase ( ) -> Optional[int]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) snake_case_ , snake_case_ , snake_case_ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'Output directory ({training_args.output_dir}) already exists and is not empty. Use' ' --overwrite_output_dir to overcome.' ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( 'Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('Training/evaluation parameters %s' , UpperCAmelCase ) # Set seed set_seed(training_args.seed ) try: snake_case_ = processors[data_args.task_name]() snake_case_ = processor.get_labels() snake_case_ = len(UpperCAmelCase ) except KeyError: raise ValueError('Task not found: %s' % (data_args.task_name) ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. snake_case_ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCAmelCase , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , ) snake_case_ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) snake_case_ = AutoModelForMultipleChoice.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=UpperCAmelCase , cache_dir=model_args.cache_dir , ) # Get datasets snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) snake_case_ = ( MultipleChoiceDataset( data_dir=data_args.data_dir , tokenizer=UpperCAmelCase , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def compute_metrics(UpperCAmelCase ) -> Dict: snake_case_ = np.argmax(p.predictions , axis=1 ) return {"acc": simple_accuracy(UpperCAmelCase , p.label_ids )} # Data collator snake_case_ = DataCollatorWithPadding(UpperCAmelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer snake_case_ = Trainer( model=UpperCAmelCase , args=UpperCAmelCase , train_dataset=UpperCAmelCase , eval_dataset=UpperCAmelCase , compute_metrics=UpperCAmelCase , data_collator=UpperCAmelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_master(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation snake_case_ = {} if training_args.do_eval: logger.info('*** Evaluate ***' ) snake_case_ = trainer.evaluate() snake_case_ = os.path.join(training_args.output_dir , 'eval_results.txt' ) if trainer.is_world_master(): with open(UpperCAmelCase , 'w' ) as writer: logger.info('***** Eval results *****' ) for key, value in result.items(): logger.info(' %s = %s' , UpperCAmelCase , UpperCAmelCase ) writer.write('%s = %s\n' % (key, value) ) results.update(UpperCAmelCase ) return results def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
312
1
"""simple docstring""" from ....configuration_utils import PretrainedConfig from ....utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''CarlCochet/trajectory-transformer-halfcheetah-medium-v2''': ( '''https://huggingface.co/CarlCochet/trajectory-transformer-halfcheetah-medium-v2/resolve/main/config.json''' ), # See all TrajectoryTransformer models at https://huggingface.co/models?filter=trajectory_transformer } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "trajectory_transformer" SCREAMING_SNAKE_CASE_ = ["past_key_values"] SCREAMING_SNAKE_CASE_ = { "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self, lowerCAmelCase__=100, lowerCAmelCase__=5, lowerCAmelCase__=1, lowerCAmelCase__=1, lowerCAmelCase__=249, lowerCAmelCase__=6, lowerCAmelCase__=17, lowerCAmelCase__=25, lowerCAmelCase__=4, lowerCAmelCase__=4, lowerCAmelCase__=128, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.1, lowerCAmelCase__=0.0006, lowerCAmelCase__=512, lowerCAmelCase__=0.02, lowerCAmelCase__=1e-12, lowerCAmelCase__=1, lowerCAmelCase__=True, lowerCAmelCase__=1, lowerCAmelCase__=5_0256, lowerCAmelCase__=5_0256, **lowerCAmelCase__, ) -> Optional[Any]: snake_case_ = vocab_size snake_case_ = action_weight snake_case_ = reward_weight snake_case_ = value_weight snake_case_ = max_position_embeddings snake_case_ = block_size snake_case_ = action_dim snake_case_ = observation_dim snake_case_ = transition_dim snake_case_ = learning_rate snake_case_ = n_layer snake_case_ = n_head snake_case_ = n_embd snake_case_ = embd_pdrop snake_case_ = attn_pdrop snake_case_ = resid_pdrop snake_case_ = initializer_range snake_case_ = layer_norm_eps snake_case_ = kaiming_initializer_range snake_case_ = use_cache super().__init__(pad_token_id=lowerCAmelCase__, bos_token_id=lowerCAmelCase__, eos_token_id=lowerCAmelCase__, **lowerCAmelCase__)
312
"""simple docstring""" from __future__ import annotations import math def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> int: if depth < 0: raise ValueError('Depth cannot be less than 0' ) if len(UpperCAmelCase ) == 0: raise ValueError('Scores cannot be empty' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) , ) def UpperCAmelCase ( ) -> None: snake_case_ = [90, 23, 6, 33, 21, 65, 123, 34423] snake_case_ = math.log(len(UpperCAmelCase ) , 2 ) print('Optimal value : ' , end='' ) print(minimax(0 , 0 , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
312
1
"""simple docstring""" import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets __UpperCamelCase = datasets.logging.get_logger(__name__) __UpperCamelCase = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' __UpperCamelCase = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' __UpperCamelCase = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=True , UpperCAmelCase=False , UpperCAmelCase="dummy_doc" ) -> Any: snake_case_ = {doc: key_lines} snake_case_ = {doc: sys_lines} snake_case_ = {} snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ , snake_case_ = reader.get_doc_mentions(UpperCAmelCase , key_doc_lines[doc] , UpperCAmelCase ) key_singletons_num += singletons_num if NP_only or min_span: snake_case_ = reader.set_annotated_parse_trees(UpperCAmelCase , key_doc_lines[doc] , UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = reader.get_doc_mentions(UpperCAmelCase , sys_doc_lines[doc] , UpperCAmelCase ) sys_singletons_num += singletons_num if NP_only or min_span: snake_case_ = reader.set_annotated_parse_trees(UpperCAmelCase , key_doc_lines[doc] , UpperCAmelCase , UpperCAmelCase ) if remove_nested: snake_case_ , snake_case_ = reader.remove_nested_coref_mentions(UpperCAmelCase , UpperCAmelCase ) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters snake_case_ , snake_case_ = reader.remove_nested_coref_mentions(UpperCAmelCase , UpperCAmelCase ) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters snake_case_ = reader.get_mention_assignments(UpperCAmelCase , UpperCAmelCase ) snake_case_ = reader.get_mention_assignments(UpperCAmelCase , UpperCAmelCase ) snake_case_ = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( 'Number of removed nested coreferring mentions in the key ' f'annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}' ) logger.info( 'Number of resulting singleton clusters in the key ' f'annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}' ) if not keep_singletons: logger.info( f'{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ' 'files, respectively' ) return doc_coref_infos def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> Tuple: snake_case_ = get_coref_infos(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) snake_case_ = {} snake_case_ = 0 snake_case_ = 0 for name, metric in metrics: snake_case_ , snake_case_ , snake_case_ = evaluator.evaluate_documents(UpperCAmelCase , UpperCAmelCase , beta=1 ) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'{name}/recall': recall, f'{name}/precision': precision, f'{name}/f1': fa} ) logger.info( name.ljust(10 ) , f'Recall: {recall * 100:.2f}' , f' Precision: {precision * 100:.2f}' , f' F1: {fa * 100:.2f}' , ) if conll_subparts_num == 3: snake_case_ = (conll / 3) * 100 logger.info(f'CoNLL score: {conll:.2f}' ) output_scores.update({'conll_score': conll} ) return output_scores def UpperCAmelCase ( UpperCAmelCase ) -> Any: snake_case_ = False for line in key_lines: if not line.startswith('#' ): if len(line.split() ) > 6: snake_case_ = line.split()[5] if not parse_col == "-": snake_case_ = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def a_ ( self) -> int: return datasets.MetricInfo( description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string')), 'references': datasets.Sequence(datasets.Value('string')), }), codebase_urls=['https://github.com/ns-moosavi/coval'], reference_urls=[ 'https://github.com/ns-moosavi/coval', 'https://www.aclweb.org/anthology/P16-1060', 'http://www.conll.cemantix.org/2012/data.html', ], ) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__=True, lowerCAmelCase__=False, lowerCAmelCase__=False, lowerCAmelCase__=False) -> Dict: snake_case_ = [ ('mentions', evaluator.mentions), ('muc', evaluator.muc), ('bcub', evaluator.b_cubed), ('ceafe', evaluator.ceafe), ('lea', evaluator.lea), ] if min_span: snake_case_ = util.check_gold_parse_annotation(lowerCAmelCase__) if not has_gold_parse: raise NotImplementedError('References should have gold parse annotation to use \'min_span\'.') # util.parse_key_file(key_file) # key_file = key_file + ".parsed" snake_case_ = evaluate( key_lines=lowerCAmelCase__, sys_lines=lowerCAmelCase__, metrics=lowerCAmelCase__, NP_only=lowerCAmelCase__, remove_nested=lowerCAmelCase__, keep_singletons=lowerCAmelCase__, min_span=lowerCAmelCase__, ) return score
312
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Optional[Any]: if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Dict: snake_case_ = [] for old_item in old_list: snake_case_ = old_item.replace('in_layers.0' , 'norm1' ) snake_case_ = new_item.replace('in_layers.2' , 'conv1' ) snake_case_ = new_item.replace('out_layers.0' , 'norm2' ) snake_case_ = new_item.replace('out_layers.3' , 'conv2' ) snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' ) snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Union[str, Any]: snake_case_ = [] for old_item in old_list: snake_case_ = old_item snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' ) snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' ) snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Optional[Any]: assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): snake_case_ = old_checkpoint[path] snake_case_ = old_tensor.shape[0] // 3 snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3 snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 ) snake_case_ = query.reshape(UpperCAmelCase ) snake_case_ = key.reshape(UpperCAmelCase ) snake_case_ = value.reshape(UpperCAmelCase ) for path in paths: snake_case_ = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: snake_case_ = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: snake_case_ = old_checkpoint[path['old']][:, :, 0] else: snake_case_ = old_checkpoint[path['old']] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> List[Any]: snake_case_ = {} snake_case_ = checkpoint['time_embed.0.weight'] snake_case_ = checkpoint['time_embed.0.bias'] snake_case_ = checkpoint['time_embed.2.weight'] snake_case_ = checkpoint['time_embed.2.bias'] snake_case_ = checkpoint['input_blocks.0.0.weight'] snake_case_ = checkpoint['input_blocks.0.0.bias'] snake_case_ = checkpoint['out.0.weight'] snake_case_ = checkpoint['out.0.bias'] snake_case_ = checkpoint['out.2.weight'] snake_case_ = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the middle blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } # Retrieves the keys for the output blocks only snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) snake_case_ = { layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key] for layer_id in range(UpperCAmelCase ) } for i in range(1 , UpperCAmelCase ): snake_case_ = (i - 1) // (config['num_res_blocks'] + 1) snake_case_ = (i - 1) % (config['num_res_blocks'] + 1) snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key] snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key] if f'input_blocks.{i}.0.op.weight' in checkpoint: snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.weight' ] snake_case_ = checkpoint[ f'input_blocks.{i}.0.op.bias' ] continue snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'} snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase ) if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'input_blocks.{i}.1', 'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'input_blocks.{i}.1.qkv.bias': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'input_blocks.{i}.1.qkv.weight': { 'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , ) snake_case_ = middle_blocks[0] snake_case_ = middle_blocks[1] snake_case_ = middle_blocks[2] snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase ) snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase ) for i in range(UpperCAmelCase ): snake_case_ = i // (config['num_res_blocks'] + 1) snake_case_ = i % (config['num_res_blocks'] + 1) snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]] snake_case_ = {} for layer in output_block_layers: snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCAmelCase ) else: snake_case_ = [layer_name] if len(UpperCAmelCase ) > 1: snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key] snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key] snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = renew_resnet_paths(UpperCAmelCase ) snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.weight' ] snake_case_ = checkpoint[ f'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(UpperCAmelCase ) == 2: snake_case_ = [] if len(UpperCAmelCase ): snake_case_ = renew_attention_paths(UpperCAmelCase ) snake_case_ = { 'old': f'output_blocks.{i}.1', 'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}', } snake_case_ = { f'output_blocks.{i}.1.qkv.bias': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, f'output_blocks.{i}.1.qkv.weight': { 'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', 'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', 'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , ) else: snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] ) snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] ) snake_case_ = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.''' ) parser.add_argument( '''--config_file''', default=None, type=str, required=True, help='''The config json file corresponding to the architecture.''', ) parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''') __UpperCamelCase = parser.parse_args() __UpperCamelCase = torch.load(args.checkpoint_path) with open(args.config_file) as f: __UpperCamelCase = json.loads(f.read()) __UpperCamelCase = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] __UpperCamelCase = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: __UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1])) __UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
312
1
"""simple docstring""" import mpmath # for roots of unity import numpy as np class UpperCamelCase : def __init__( self, lowerCAmelCase__=None, lowerCAmelCase__=None) -> List[str]: # Input as list snake_case_ = list(poly_a or [0])[:] snake_case_ = list(poly_b or [0])[:] # Remove leading zero coefficients while self.polyA[-1] == 0: self.polyA.pop() snake_case_ = len(self.polyA) while self.polyB[-1] == 0: self.polyB.pop() snake_case_ = len(self.polyB) # Add 0 to make lengths equal a power of 2 snake_case_ = int( 2 ** np.ceil(np.loga(len(self.polyA) + len(self.polyB) - 1))) while len(self.polyA) < self.c_max_length: self.polyA.append(0) while len(self.polyB) < self.c_max_length: self.polyB.append(0) # A complex root used for the fourier transform snake_case_ = complex(mpmath.root(x=1, n=self.c_max_length, k=1)) # The product snake_case_ = self.__multiply() def a_ ( self, lowerCAmelCase__) -> Union[str, Any]: snake_case_ = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB] # Corner case if len(lowerCAmelCase__) <= 1: return dft[0] # snake_case_ = self.c_max_length // 2 while next_ncol > 0: snake_case_ = [[] for i in range(lowerCAmelCase__)] snake_case_ = self.root**next_ncol # First half of next step snake_case_ = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(lowerCAmelCase__): new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j]) current_root *= root # Second half of next step snake_case_ = 1 for j in range(self.c_max_length // (next_ncol * 2)): for i in range(lowerCAmelCase__): new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j]) current_root *= root # Update snake_case_ = new_dft snake_case_ = next_ncol // 2 return dft[0] def a_ ( self) -> Union[str, Any]: snake_case_ = self.__dft('A') snake_case_ = self.__dft('B') snake_case_ = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length)]] del dft_a del dft_b # Corner Case if len(inverce_c[0]) <= 1: return inverce_c[0] # Inverse DFT snake_case_ = 2 while next_ncol <= self.c_max_length: snake_case_ = [[] for i in range(lowerCAmelCase__)] snake_case_ = self.root ** (next_ncol // 2) snake_case_ = 1 # First half of next step for j in range(self.c_max_length // next_ncol): for i in range(next_ncol // 2): # Even positions new_inverse_c[i].append( ( inverce_c[i][j] + inverce_c[i][j + self.c_max_length // next_ncol] ) / 2) # Odd positions new_inverse_c[i + next_ncol // 2].append( ( inverce_c[i][j] - inverce_c[i][j + self.c_max_length // next_ncol] ) / (2 * current_root)) current_root *= root # Update snake_case_ = new_inverse_c next_ncol *= 2 # Unpack snake_case_ = [round(x[0].real, 8) + round(x[0].imag, 8) * 1J for x in inverce_c] # Remove leading 0's while inverce_c[-1] == 0: inverce_c.pop() return inverce_c def __str__( self) -> Tuple: snake_case_ = 'A = ' + ' + '.join( f'{coef}*x^{i}' for coef, i in enumerate(self.polyA[: self.len_A])) snake_case_ = 'B = ' + ' + '.join( f'{coef}*x^{i}' for coef, i in enumerate(self.polyB[: self.len_B])) snake_case_ = 'A*B = ' + ' + '.join( f'{coef}*x^{i}' for coef, i in enumerate(self.product)) return f'{a}\n{b}\n{c}' # Unit tests if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel def UpperCAmelCase ( UpperCAmelCase ) -> Dict: # vision encoder if "img_encoder.pos_embed" in name: snake_case_ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' ) if "img_encoder.patch_embed.proj" in name: snake_case_ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' ) if "img_encoder.patch_embed.norm" in name: snake_case_ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' ) if "img_encoder.layers" in name: snake_case_ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' ) if "blocks" in name and "res" not in name: snake_case_ = name.replace('blocks' , 'layers' ) if "attn" in name and "pre_assign" not in name: snake_case_ = name.replace('attn' , 'self_attn' ) if "proj" in name and "self_attn" in name and "text" not in name: snake_case_ = name.replace('proj' , 'out_proj' ) if "pre_assign_attn.attn.proj" in name: snake_case_ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' ) if "norm1" in name: snake_case_ = name.replace('norm1' , 'layer_norm1' ) if "norm2" in name and "pre_assign" not in name: snake_case_ = name.replace('norm2' , 'layer_norm2' ) if "img_encoder.norm" in name: snake_case_ = name.replace('img_encoder.norm' , 'vision_model.layernorm' ) # text encoder if "text_encoder.token_embedding" in name: snake_case_ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' ) if "text_encoder.positional_embedding" in name: snake_case_ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' ) if "text_encoder.transformer.resblocks." in name: snake_case_ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' ) if "ln_1" in name: snake_case_ = name.replace('ln_1' , 'layer_norm1' ) if "ln_2" in name: snake_case_ = name.replace('ln_2' , 'layer_norm2' ) if "c_fc" in name: snake_case_ = name.replace('c_fc' , 'fc1' ) if "c_proj" in name: snake_case_ = name.replace('c_proj' , 'fc2' ) if "text_encoder" in name: snake_case_ = name.replace('text_encoder' , 'text_model' ) if "ln_final" in name: snake_case_ = name.replace('ln_final' , 'final_layer_norm' ) # projection layers if "img_projector.linear_hidden." in name: snake_case_ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' ) if "img_projector.linear_out." in name: snake_case_ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' ) if "text_projector.linear_hidden" in name: snake_case_ = name.replace('text_projector.linear_hidden' , 'text_projection' ) if "text_projector.linear_out" in name: snake_case_ = name.replace('text_projector.linear_out' , 'text_projection.3' ) return name def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(UpperCAmelCase ) if "qkv" in key: # weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ , snake_case_ = int(key_split[2] ), int(key_split[4] ) snake_case_ = config.vision_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[dim : dim * 2, :] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] elif "in_proj" in key: # weights and biases of the key, value and query projections of text encoder's attention layers require special treatment: # we need to split them up into separate matrices/vectors snake_case_ = key.split('.' ) snake_case_ = int(key_split[3] ) snake_case_ = config.text_config.hidden_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = rename_key(UpperCAmelCase ) # squeeze if necessary if ( "text_projection.0" in new_name or "text_projection.3" in new_name or "visual_projection.0" in new_name or "visual_projection.3" in new_name ): snake_case_ = val.squeeze_() else: snake_case_ = val return orig_state_dict def UpperCAmelCase ( ) -> Any: snake_case_ = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case_ = Image.open(requests.get(UpperCAmelCase , stream=UpperCAmelCase ).raw ) return im @torch.no_grad() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase="groupvit-gcc-yfcc" , UpperCAmelCase=False ) -> int: snake_case_ = GroupViTConfig() snake_case_ = GroupViTModel(UpperCAmelCase ).eval() snake_case_ = torch.load(UpperCAmelCase , map_location='cpu' )['model'] snake_case_ = convert_state_dict(UpperCAmelCase , UpperCAmelCase ) snake_case_ , snake_case_ = model.load_state_dict(UpperCAmelCase , strict=UpperCAmelCase ) assert missing_keys == ["text_model.embeddings.position_ids"] assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(UpperCAmelCase ) == 0) # verify result snake_case_ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' ) snake_case_ = prepare_img() snake_case_ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=UpperCAmelCase , padding=UpperCAmelCase , return_tensors='pt' ) with torch.no_grad(): snake_case_ = model(**UpperCAmelCase ) if model_name == "groupvit-gcc-yfcc": snake_case_ = torch.tensor([[13.3_523, 6.3_629]] ) elif model_name == "groupvit-gcc-redcaps": snake_case_ = torch.tensor([[16.1_873, 8.6_230]] ) else: raise ValueError(f'Model name {model_name} not supported.' ) assert torch.allclose(outputs.logits_per_image , UpperCAmelCase , atol=1e-3 ) processor.save_pretrained(UpperCAmelCase ) model.save_pretrained(UpperCAmelCase ) print('Successfully saved processor and model to' , UpperCAmelCase ) if push_to_hub: print('Pushing to the hub...' ) processor.push_to_hub(UpperCAmelCase , organization='nielsr' ) model.push_to_hub(UpperCAmelCase , organization='nielsr' ) if __name__ == "__main__": __UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to dump the processor and PyTorch model.''' ) parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to GroupViT checkpoint''') parser.add_argument( '''--model_name''', default='''groupvit-gccy-fcc''', type=str, help='''Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.''', ) __UpperCamelCase = parser.parse_args() convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
312
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): @register_to_config def __init__( self, lowerCAmelCase__ = 768, ) -> Optional[Any]: super().__init__() snake_case_ = nn.Parameter(torch.zeros(1, lowerCAmelCase__)) snake_case_ = nn.Parameter(torch.ones(1, lowerCAmelCase__)) def a_ ( self, lowerCAmelCase__ = None, lowerCAmelCase__ = None, ) -> Optional[int]: snake_case_ = nn.Parameter(self.mean.to(lowerCAmelCase__).to(lowerCAmelCase__)) snake_case_ = nn.Parameter(self.std.to(lowerCAmelCase__).to(lowerCAmelCase__)) return self def a_ ( self, lowerCAmelCase__) -> List[str]: snake_case_ = (embeds - self.mean) * 1.0 / self.std return embeds def a_ ( self, lowerCAmelCase__) -> Tuple: snake_case_ = (embeds * self.std) + self.mean return embeds
312
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __UpperCamelCase = {'''configuration_mmbt''': ['''MMBTConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase = ['''MMBTForClassification''', '''MMBTModel''', '''ModalEmbeddings'''] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
312
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "glpn" def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=4, lowerCAmelCase__=[2, 2, 2, 2], lowerCAmelCase__=[8, 4, 2, 1], lowerCAmelCase__=[32, 64, 160, 256], lowerCAmelCase__=[7, 3, 3, 3], lowerCAmelCase__=[4, 2, 2, 2], lowerCAmelCase__=[1, 2, 5, 8], lowerCAmelCase__=[4, 4, 4, 4], lowerCAmelCase__="gelu", lowerCAmelCase__=0.0, lowerCAmelCase__=0.0, lowerCAmelCase__=0.02, lowerCAmelCase__=0.1, lowerCAmelCase__=1e-6, lowerCAmelCase__=64, lowerCAmelCase__=10, lowerCAmelCase__=-1, **lowerCAmelCase__, ) -> str: super().__init__(**lowerCAmelCase__) snake_case_ = num_channels snake_case_ = num_encoder_blocks snake_case_ = depths snake_case_ = sr_ratios snake_case_ = hidden_sizes snake_case_ = patch_sizes snake_case_ = strides snake_case_ = mlp_ratios snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = drop_path_rate snake_case_ = layer_norm_eps snake_case_ = decoder_hidden_size snake_case_ = max_depth snake_case_ = head_in_index
312
"""simple docstring""" from __future__ import annotations def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> list[str]: if partitions <= 0: raise ValueError('partitions must be a positive number!' ) if partitions > number_of_bytes: raise ValueError('partitions can not > number_of_bytes!' ) snake_case_ = number_of_bytes // partitions snake_case_ = [] for i in range(UpperCAmelCase ): snake_case_ = i * bytes_per_partition + 1 snake_case_ = ( number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition ) allocation_list.append(f'{start_bytes}-{end_bytes}' ) return allocation_list if __name__ == "__main__": import doctest doctest.testmod()
312
1
"""simple docstring""" import copy import fnmatch import json import os import pickle as pkl import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from functools import partial from hashlib import shaaaa from io import BytesIO from pathlib import Path from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import cva import numpy as np import requests import wget from filelock import FileLock from PIL import Image from tqdm.auto import tqdm from yaml import Loader, dump, load try: import torch __UpperCamelCase = True except ImportError: __UpperCamelCase = False try: from torch.hub import _get_torch_home __UpperCamelCase = _get_torch_home() except ImportError: __UpperCamelCase = os.path.expanduser( os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch''')) ) __UpperCamelCase = os.path.join(torch_cache_home, '''transformers''') __UpperCamelCase = '''https://cdn.huggingface.co''' __UpperCamelCase = '''https://s3.amazonaws.com/models.huggingface.co/bert''' __UpperCamelCase = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1]) __UpperCamelCase = os.path.join(PATH, '''config.yaml''') __UpperCamelCase = os.path.join(PATH, '''attributes.txt''') __UpperCamelCase = os.path.join(PATH, '''objects.txt''') __UpperCamelCase = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path) __UpperCamelCase = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE) __UpperCamelCase = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE) __UpperCamelCase = '''pytorch_model.bin''' __UpperCamelCase = '''config.yaml''' def UpperCAmelCase ( UpperCAmelCase=OBJECTS , UpperCAmelCase=ATTRIBUTES ) -> int: snake_case_ = [] with open(UpperCAmelCase ) as f: for object in f.readlines(): vg_classes.append(object.split(',' )[0].lower().strip() ) snake_case_ = [] with open(UpperCAmelCase ) as f: for object in f.readlines(): vg_attrs.append(object.split(',' )[0].lower().strip() ) return vg_classes, vg_attrs def UpperCAmelCase ( UpperCAmelCase ) -> Any: snake_case_ = OrderedDict() with open(UpperCAmelCase , 'rb' ) as f: snake_case_ = pkl.load(UpperCAmelCase )['model'] for k in copy.deepcopy(list(ckp.keys() ) ): snake_case_ = ckp.pop(UpperCAmelCase ) if isinstance(UpperCAmelCase , np.ndarray ): snake_case_ = torch.tensor(UpperCAmelCase ) else: assert isinstance(UpperCAmelCase , torch.tensor ), type(UpperCAmelCase ) snake_case_ = v return r class UpperCamelCase : SCREAMING_SNAKE_CASE_ = {} def __init__( self, lowerCAmelCase__, lowerCAmelCase__ = "root", lowerCAmelCase__=0) -> str: snake_case_ = name snake_case_ = level snake_case_ = {} for k, v in dictionary.items(): if v is None: raise ValueError() snake_case_ = copy.deepcopy(lowerCAmelCase__) snake_case_ = copy.deepcopy(lowerCAmelCase__) if isinstance(lowerCAmelCase__, lowerCAmelCase__): snake_case_ = Config(lowerCAmelCase__, name=lowerCAmelCase__, level=level + 1) snake_case_ = v setattr(self, lowerCAmelCase__, lowerCAmelCase__) snake_case_ = d def __repr__( self) -> int: return str(list((self._pointer.keys()))) def __setattr__( self, lowerCAmelCase__, lowerCAmelCase__) -> Dict: snake_case_ = val snake_case_ = val snake_case_ = key.split('.') snake_case_ = len(lowerCAmelCase__) - 1 snake_case_ = self._pointer if len(lowerCAmelCase__) > 1: for i, l in enumerate(lowerCAmelCase__): if hasattr(self, lowerCAmelCase__) and isinstance(getattr(self, lowerCAmelCase__), lowerCAmelCase__): setattr(getattr(self, lowerCAmelCase__), '.'.join(levels[i:]), lowerCAmelCase__) if l == last_level: snake_case_ = val else: snake_case_ = pointer[l] def a_ ( self) -> List[Any]: return self._pointer def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> Optional[int]: with open(f'{file_name}', 'w') as stream: dump(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self, lowerCAmelCase__, lowerCAmelCase__) -> List[Any]: with open(f'{file_name}', 'w') as stream: json.dump(lowerCAmelCase__, lowerCAmelCase__) @staticmethod def a_ ( lowerCAmelCase__) -> Optional[int]: with open(lowerCAmelCase__) as stream: snake_case_ = load(lowerCAmelCase__, Loader=lowerCAmelCase__) return data def __str__( self) -> Dict: snake_case_ = ' ' if self._name != "root": snake_case_ = f'{t * (self._level-1)}{self._name}:\n' else: snake_case_ = '' snake_case_ = self._level for i, (k, v) in enumerate(self._pointer.items()): if isinstance(lowerCAmelCase__, lowerCAmelCase__): r += f'{t * (self._level)}{v}\n' self._level += 1 else: r += f'{t * (self._level)}{k}: {v} ({type(lowerCAmelCase__).__name__})\n' snake_case_ = level return r[:-1] @classmethod def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> Optional[Any]: snake_case_ , snake_case_ = cls.get_config_dict(lowerCAmelCase__, **lowerCAmelCase__) return cls(lowerCAmelCase__) @classmethod def a_ ( cls, lowerCAmelCase__, **lowerCAmelCase__) -> int: snake_case_ = kwargs.pop('cache_dir', lowerCAmelCase__) snake_case_ = kwargs.pop('force_download', lowerCAmelCase__) snake_case_ = kwargs.pop('resume_download', lowerCAmelCase__) snake_case_ = kwargs.pop('proxies', lowerCAmelCase__) snake_case_ = kwargs.pop('local_files_only', lowerCAmelCase__) if os.path.isdir(lowerCAmelCase__): snake_case_ = os.path.join(lowerCAmelCase__, lowerCAmelCase__) elif os.path.isfile(lowerCAmelCase__) or is_remote_url(lowerCAmelCase__): snake_case_ = pretrained_model_name_or_path else: snake_case_ = hf_bucket_url(lowerCAmelCase__, filename=lowerCAmelCase__, use_cdn=lowerCAmelCase__) try: # Load from URL or cache if already cached snake_case_ = cached_path( lowerCAmelCase__, cache_dir=lowerCAmelCase__, force_download=lowerCAmelCase__, proxies=lowerCAmelCase__, resume_download=lowerCAmelCase__, local_files_only=lowerCAmelCase__, ) # Load config dict if resolved_config_file is None: raise EnvironmentError snake_case_ = Config.load_yaml(lowerCAmelCase__) except EnvironmentError: snake_case_ = 'Can\'t load config for' raise EnvironmentError(lowerCAmelCase__) if resolved_config_file == config_file: print('loading configuration file from path') else: print('loading configuration file cache') return Config.load_yaml(lowerCAmelCase__), kwargs def UpperCAmelCase ( UpperCAmelCase ) -> Optional[int]: snake_case_ = torch.load('dump.pt' , map_location=in_tensor.device ) snake_case_ = in_tensor.numpy() snake_case_ = out_tensor.numpy()[0] print(na.shape , na[0, 0, :5] ) print(na.shape , na[0, 0, :5] ) assert np.allclose(UpperCAmelCase , UpperCAmelCase , rtol=0.01 , atol=0.1 ), ( f'{sum([1 for x in np.isclose(UpperCAmelCase , UpperCAmelCase , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*100:.4f} %' " element-wise mismatch" ) raise Exception('tensors are all good' ) # Hugging face functions below def UpperCAmelCase ( UpperCAmelCase ) -> str: snake_case_ = urlparse(UpperCAmelCase ) return parsed.scheme in ("http", "https") def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=True ) -> str: snake_case_ = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX snake_case_ = '/' not in model_id if legacy_format: return f'{endpoint}/{model_id}-{filename}' else: return f'{endpoint}/{model_id}/{filename}' def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=0 , UpperCAmelCase=None , ) -> List[Any]: snake_case_ = 'python/{}'.format(sys.version.split()[0] ) if _torch_available: ua += "; torch/{}".format(torch.__version__ ) if isinstance(UpperCAmelCase , UpperCAmelCase ): ua += "; " + "; ".join('{}/{}'.format(UpperCAmelCase , UpperCAmelCase ) for k, v in user_agent.items() ) elif isinstance(UpperCAmelCase , UpperCAmelCase ): ua += "; " + user_agent snake_case_ = {'user-agent': ua} if resume_size > 0: snake_case_ = 'bytes=%d-' % (resume_size,) snake_case_ = requests.get(UpperCAmelCase , stream=UpperCAmelCase , proxies=UpperCAmelCase , headers=UpperCAmelCase ) if response.status_code == 416: # Range not satisfiable return snake_case_ = response.headers.get('Content-Length' ) snake_case_ = resume_size + int(UpperCAmelCase ) if content_length is not None else None snake_case_ = tqdm( unit='B' , unit_scale=UpperCAmelCase , total=UpperCAmelCase , initial=UpperCAmelCase , desc='Downloading' , ) for chunk in response.iter_content(chunk_size=1024 ): if chunk: # filter out keep-alive new chunks progress.update(len(UpperCAmelCase ) ) temp_file.write(UpperCAmelCase ) progress.close() def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=10 , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=False , ) -> List[Any]: if cache_dir is None: snake_case_ = TRANSFORMERS_CACHE if isinstance(UpperCAmelCase , UpperCAmelCase ): snake_case_ = str(UpperCAmelCase ) os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase ) snake_case_ = None if not local_files_only: try: snake_case_ = requests.head(UpperCAmelCase , allow_redirects=UpperCAmelCase , proxies=UpperCAmelCase , timeout=UpperCAmelCase ) if response.status_code == 200: snake_case_ = response.headers.get('ETag' ) except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass snake_case_ = url_to_filename(UpperCAmelCase , UpperCAmelCase ) # get cache path to put the file snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(UpperCAmelCase ): return cache_path else: snake_case_ = [ file for file in fnmatch.filter(os.listdir(UpperCAmelCase ) , filename + '.*' ) if not file.endswith('.json' ) and not file.endswith('.lock' ) ] if len(UpperCAmelCase ) > 0: return os.path.join(UpperCAmelCase , matching_files[-1] ) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( 'Cannot find the requested files in the cached path and outgoing traffic has been' ' disabled. To enable model look-ups and downloads online, set \'local_files_only\'' ' to False.' ) return None # From now on, etag is not None. if os.path.exists(UpperCAmelCase ) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. snake_case_ = cache_path + '.lock' with FileLock(UpperCAmelCase ): # If the download just completed while the lock was activated. if os.path.exists(UpperCAmelCase ) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: snake_case_ = cache_path + '.incomplete' @contextmanager def _resumable_file_manager(): with open(UpperCAmelCase , 'a+b' ) as f: yield f snake_case_ = _resumable_file_manager if os.path.exists(UpperCAmelCase ): snake_case_ = os.stat(UpperCAmelCase ).st_size else: snake_case_ = 0 else: snake_case_ = partial(tempfile.NamedTemporaryFile , dir=UpperCAmelCase , delete=UpperCAmelCase ) snake_case_ = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: print( '%s not found in cache or force_download set to True, downloading to %s' , UpperCAmelCase , temp_file.name , ) http_get( UpperCAmelCase , UpperCAmelCase , proxies=UpperCAmelCase , resume_size=UpperCAmelCase , user_agent=UpperCAmelCase , ) os.replace(temp_file.name , UpperCAmelCase ) snake_case_ = {'url': url, 'etag': etag} snake_case_ = cache_path + '.json' with open(UpperCAmelCase , 'w' ) as meta_file: json.dump(UpperCAmelCase , UpperCAmelCase ) return cache_path def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None ) -> str: snake_case_ = url.encode('utf-8' ) snake_case_ = shaaaa(UpperCAmelCase ) snake_case_ = url_hash.hexdigest() if etag: snake_case_ = etag.encode('utf-8' ) snake_case_ = shaaaa(UpperCAmelCase ) filename += "." + etag_hash.hexdigest() if url.endswith('.h5' ): filename += ".h5" return filename def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=None , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=False , ) -> Tuple: if cache_dir is None: snake_case_ = TRANSFORMERS_CACHE if isinstance(UpperCAmelCase , UpperCAmelCase ): snake_case_ = str(UpperCAmelCase ) if isinstance(UpperCAmelCase , UpperCAmelCase ): snake_case_ = str(UpperCAmelCase ) if is_remote_url(UpperCAmelCase ): # URL, so get it from the cache (downloading if necessary) snake_case_ = get_from_cache( UpperCAmelCase , cache_dir=UpperCAmelCase , force_download=UpperCAmelCase , proxies=UpperCAmelCase , resume_download=UpperCAmelCase , user_agent=UpperCAmelCase , local_files_only=UpperCAmelCase , ) elif os.path.exists(UpperCAmelCase ): # File, and it exists. snake_case_ = url_or_filename elif urlparse(UpperCAmelCase ).scheme == "": # File, but it doesn't exist. raise EnvironmentError('file {} not found'.format(UpperCAmelCase ) ) else: # Something unknown raise ValueError('unable to parse {} as a URL or as a local path'.format(UpperCAmelCase ) ) if extract_compressed_file: if not is_zipfile(UpperCAmelCase ) and not tarfile.is_tarfile(UpperCAmelCase ): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" snake_case_ , snake_case_ = os.path.split(UpperCAmelCase ) snake_case_ = output_file.replace('.' , '-' ) + '-extracted' snake_case_ = os.path.join(UpperCAmelCase , UpperCAmelCase ) if os.path.isdir(UpperCAmelCase ) and os.listdir(UpperCAmelCase ) and not force_extract: return output_path_extracted # Prevent parallel extractions snake_case_ = output_path + '.lock' with FileLock(UpperCAmelCase ): shutil.rmtree(UpperCAmelCase , ignore_errors=UpperCAmelCase ) os.makedirs(UpperCAmelCase ) if is_zipfile(UpperCAmelCase ): with ZipFile(UpperCAmelCase , 'r' ) as zip_file: zip_file.extractall(UpperCAmelCase ) zip_file.close() elif tarfile.is_tarfile(UpperCAmelCase ): snake_case_ = tarfile.open(UpperCAmelCase ) tar_file.extractall(UpperCAmelCase ) tar_file.close() else: raise EnvironmentError('Archive format of {} could not be identified'.format(UpperCAmelCase ) ) return output_path_extracted return output_path def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase="," ) -> Dict: assert isinstance(UpperCAmelCase , UpperCAmelCase ) if os.path.isfile(UpperCAmelCase ): with open(UpperCAmelCase ) as f: snake_case_ = eval(f.read() ) else: snake_case_ = requests.get(UpperCAmelCase ) try: snake_case_ = requests.json() except Exception: snake_case_ = req.content.decode() assert data is not None, "could not connect" try: snake_case_ = eval(UpperCAmelCase ) except Exception: snake_case_ = data.split('\n' ) req.close() return data def UpperCAmelCase ( UpperCAmelCase ) -> str: snake_case_ = requests.get(UpperCAmelCase ) snake_case_ = np.array(Image.open(BytesIO(response.content ) ) ) return img def UpperCAmelCase ( UpperCAmelCase ) -> Optional[Any]: snake_case_ = url.split('/' )[-1] if fn not in os.listdir(os.getcwd() ): wget.download(UpperCAmelCase ) with open(UpperCAmelCase , 'rb' ) as stream: snake_case_ = pkl.load(UpperCAmelCase ) snake_case_ = weights.pop('model' ) snake_case_ = {} for k, v in model.items(): snake_case_ = torch.from_numpy(UpperCAmelCase ) if "running_var" in k: snake_case_ = torch.tensor([0] ) snake_case_ = k.replace('running_var' , 'num_batches_tracked' ) snake_case_ = zero return new def UpperCAmelCase ( ) -> List[str]: print(f'{os.path.abspath(os.path.join(UpperCAmelCase , os.pardir ) )}/demo.ipynb' ) def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase="RGB" ) -> List[Any]: assert isinstance(UpperCAmelCase , UpperCAmelCase ) if os.path.isfile(UpperCAmelCase ): snake_case_ = cva.imread(UpperCAmelCase ) else: snake_case_ = get_image_from_url(UpperCAmelCase ) assert img is not None, f'could not connect to: {im}' snake_case_ = cva.cvtColor(UpperCAmelCase , cva.COLOR_BGR2RGB ) if input_format == "RGB": snake_case_ = img[:, :, ::-1] return img def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> List[Any]: return (images[i : i + batch] for i in range(0 , len(UpperCAmelCase ) , UpperCAmelCase ))
312
"""simple docstring""" __UpperCamelCase = 256 # Modulus to hash a string __UpperCamelCase = 100_0003 def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> bool: snake_case_ = len(UpperCAmelCase ) snake_case_ = len(UpperCAmelCase ) if p_len > t_len: return False snake_case_ = 0 snake_case_ = 0 snake_case_ = 1 # Calculating the hash of pattern and substring of text for i in range(UpperCAmelCase ): snake_case_ = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus snake_case_ = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue snake_case_ = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash snake_case_ = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def UpperCAmelCase ( ) -> None: snake_case_ = 'abc1abc12' snake_case_ = 'alskfjaldsabc1abc1abc12k23adsfabcabc' snake_case_ = 'alskfjaldsk23adsfabcabc' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) and not rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 2) snake_case_ = 'ABABX' snake_case_ = 'ABABZABABYABABX' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 3) snake_case_ = 'AAAB' snake_case_ = 'ABAAAAAB' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 4) snake_case_ = 'abcdabcy' snake_case_ = 'abcxabcdabxabcdabcdabcy' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) # Test 5) snake_case_ = 'Lü' snake_case_ = 'Lüsai' assert rabin_karp(UpperCAmelCase , UpperCAmelCase ) snake_case_ = 'Lue' assert not rabin_karp(UpperCAmelCase , UpperCAmelCase ) print('Success.' ) if __name__ == "__main__": test_rabin_karp()
312
1
"""simple docstring""" from __future__ import annotations __UpperCamelCase = list[list[int]] # assigning initial values to the grid __UpperCamelCase = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution __UpperCamelCase = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def UpperCAmelCase ( UpperCAmelCase ) -> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def UpperCAmelCase ( UpperCAmelCase ) -> Matrix | None: if location := find_empty_location(UpperCAmelCase ): snake_case_ , snake_case_ = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ): snake_case_ = digit if sudoku(UpperCAmelCase ) is not None: return grid snake_case_ = 0 return None def UpperCAmelCase ( UpperCAmelCase ) -> None: for row in grid: for cell in row: print(UpperCAmelCase , end=' ' ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') __UpperCamelCase = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
312
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __UpperCamelCase = logging.get_logger(__name__) __UpperCamelCase = { '''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''', } class UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = "resnet" SCREAMING_SNAKE_CASE_ = ["basic", "bottleneck"] def __init__( self, lowerCAmelCase__=3, lowerCAmelCase__=64, lowerCAmelCase__=[256, 512, 1024, 2048], lowerCAmelCase__=[3, 4, 6, 3], lowerCAmelCase__="bottleneck", lowerCAmelCase__="relu", lowerCAmelCase__=False, lowerCAmelCase__=None, lowerCAmelCase__=None, **lowerCAmelCase__, ) -> Dict: super().__init__(**lowerCAmelCase__) if layer_type not in self.layer_types: raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types)}') snake_case_ = num_channels snake_case_ = embedding_size snake_case_ = hidden_sizes snake_case_ = depths snake_case_ = layer_type snake_case_ = hidden_act snake_case_ = downsample_in_first_stage snake_case_ = ['stem'] + [f'stage{idx}' for idx in range(1, len(lowerCAmelCase__) + 1)] snake_case_ , snake_case_ = get_aligned_output_features_output_indices( out_features=lowerCAmelCase__, out_indices=lowerCAmelCase__, stage_names=self.stage_names) class UpperCamelCase ( lowerCAmelCase__ ): SCREAMING_SNAKE_CASE_ = version.parse("1.11" ) @property def a_ ( self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ]) @property def a_ ( self) -> float: return 1e-3
312
1
"""simple docstring""" __UpperCamelCase = { "km/h": 1.0, "m/s": 3.6, "mph": 1.609344, "knot": 1.852, } __UpperCamelCase = { "km/h": 1.0, "m/s": 0.277777778, "mph": 0.621371192, "knot": 0.539956803, } def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) -> float: if unit_to not in speed_chart or unit_from not in speed_chart_inverse: snake_case_ = ( f'Incorrect \'from_type\' or \'to_type\' value: {unit_from!r}, {unit_to!r}\n' f'Valid values are: {", ".join(UpperCAmelCase )}' ) raise ValueError(UpperCAmelCase ) return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 ) if __name__ == "__main__": import doctest doctest.testmod()
312
"""simple docstring""" import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCamelCase = get_tests_dir('''fixtures/spiece.model''') @require_sentencepiece @require_tokenizers class UpperCamelCase ( lowerCAmelCase__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ = DebertaVaTokenizer SCREAMING_SNAKE_CASE_ = DebertaVaTokenizerFast SCREAMING_SNAKE_CASE_ = True SCREAMING_SNAKE_CASE_ = True def a_ ( self) -> int: super().setUp() # We have a SentencePiece fixture for testing snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, unk_token='<unk>') tokenizer.save_pretrained(self.tmpdirname) def a_ ( self, lowerCAmelCase__) -> Any: snake_case_ = 'this is a test' snake_case_ = 'this is a test' return input_text, output_text def a_ ( self) -> Optional[int]: snake_case_ = '<pad>' snake_case_ = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase__), lowerCAmelCase__) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase__), lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], '<pad>') self.assertEqual(vocab_keys[1], '<unk>') self.assertEqual(vocab_keys[-1], '[PAD]') self.assertEqual(len(lowerCAmelCase__), 3_0001) def a_ ( self) -> Dict: self.assertEqual(self.get_tokenizer().vocab_size, 3_0000) def a_ ( self) -> List[str]: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁hello', '!', 'how', '▁are', '▁you', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> str: pass @unittest.skip('There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.') def a_ ( self) -> List[Any]: pass def a_ ( self) -> str: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> List[Any]: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁i', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', '▁', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '▁', '.', ] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Any: # fmt: off snake_case_ = ' \tHeLLo!how \n Are yoU? ' snake_case_ = ['▁', '<unk>', 'e', '<unk>', 'o', '!', 'how', '▁', '<unk>', 're', '▁yo', '<unk>', '?'] # fmt: on snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, do_lower_case=lowerCAmelCase__, split_by_punct=lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Dict: snake_case_ = self.get_tokenizer() snake_case_ = self.get_rust_tokenizer() snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) snake_case_ = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__)) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = self.get_rust_tokenizer() snake_case_ = tokenizer.encode(lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> int: snake_case_ = 'This is a test' snake_case_ = [13, 1, 4398, 25, 21, 1289] snake_case_ = ['▁', 'T', 'his', '▁is', '▁a', '▁test'] snake_case_ = ['▁', '<unk>', 'his', '▁is', '▁a', '▁test'] snake_case_ = DebertaVaTokenizer(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = DebertaVaTokenizerFast(lowerCAmelCase__, keep_accents=lowerCAmelCase__) snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) # fmt: off snake_case_ = 'I was born in 92000, and this is falsé.' snake_case_ = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] snake_case_ = ['▁', 'I', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', 'é', '.', ] snake_case_ = ['▁', '<unk>', '▁was', '▁born', '▁in', '▁9', '2000', ',', '▁and', '▁this', '▁is', '▁fal', 's', '<unk>', '.', ] # fmt: on snake_case_ = tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.encode(lowerCAmelCase__, add_special_tokens=lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.tokenize(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) snake_case_ = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase__) self.assertListEqual(lowerCAmelCase__, lowerCAmelCase__) def a_ ( self) -> Tuple: snake_case_ = DebertaVaTokenizer(lowerCAmelCase__) snake_case_ = tokenizer.encode('sequence builders') snake_case_ = tokenizer.encode('multi-sequence build') snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__) snake_case_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase__, lowerCAmelCase__) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id], lowerCAmelCase__) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id], lowerCAmelCase__, ) @slow def a_ ( self) -> Union[str, Any]: # fmt: off snake_case_ = {'input_ids': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'token_type_ids': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase__, model_name='microsoft/deberta-v2-xlarge', revision='ad6e42c1532ddf3a15c39246b63f5559d558b670', )
312
1