code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase :str = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :str = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / 'utils')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 _lowerCAmelCase :Dict = get_tests_dir('fixtures') class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> int: # A mock response for an HTTP head request to emulate server down _UpperCAmelCase : List[str] = mock.Mock() _UpperCAmelCase : str = 5_0_0 _UpperCAmelCase : Tuple = {} _UpperCAmelCase : List[Any] = HTTPError _UpperCAmelCase : Tuple = {} # Download this model to make sure it's in the cache. _UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('''requests.Session.request''' , return_value=A ) as mock_head: _UpperCAmelCase : Any = WavaVecaFeatureExtractor.from_pretrained('''hf-internal-testing/tiny-random-wav2vec2''' ) # This check we did call the fake head request mock_head.assert_called() def __lowerCAmelCase ( self ) -> List[Any]: # This test is for deprecated behavior and can be removed in v5 _UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained( '''https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json''' ) @is_staging_test class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __lowerCAmelCase ( cls ) -> Optional[Any]: _UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def __lowerCAmelCase ( cls ) -> Any: try: delete_repo(token=cls._token , repo_id='''test-feature-extractor''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''valid_org/test-feature-extractor-org''' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='''test-dynamic-feature-extractor''' ) except HTTPError: pass def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('''test-feature-extractor''' , use_auth_token=self._token ) _UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id='''test-feature-extractor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A , repo_id='''test-feature-extractor''' , push_to_hub=A , use_auth_token=self._token ) _UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(f'{USER}/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('''valid_org/test-feature-extractor''' , use_auth_token=self._token ) _UpperCAmelCase : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id='''valid_org/test-feature-extractor''' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( A , repo_id='''valid_org/test-feature-extractor-org''' , push_to_hub=A , use_auth_token=self._token ) _UpperCAmelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained('''valid_org/test-feature-extractor-org''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def __lowerCAmelCase ( self ) -> List[Any]: CustomFeatureExtractor.register_for_auto_class() _UpperCAmelCase : List[Any] = CustomFeatureExtractor.from_pretrained(A ) feature_extractor.push_to_hub('''test-dynamic-feature-extractor''' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'''AutoFeatureExtractor''': '''custom_feature_extraction.CustomFeatureExtractor'''} , ) _UpperCAmelCase : Any = AutoFeatureExtractor.from_pretrained( f'{USER}/test-dynamic-feature-extractor' , trust_remote_code=A ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , '''CustomFeatureExtractor''' )
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" from __future__ import annotations from typing import Any class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A , A = 0 ) -> None: _UpperCAmelCase , _UpperCAmelCase : Tuple = row, column _UpperCAmelCase : Tuple = [[default_value for c in range(A )] for r in range(A )] def __str__( self ) -> str: _UpperCAmelCase : Optional[int] = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier _UpperCAmelCase : List[str] = 0 for row_vector in self.array: for obj in row_vector: _UpperCAmelCase : int = max(A , len(str(A ) ) ) _UpperCAmelCase : int = f'%{max_element_length}s' # Make string and return def single_line(A ) -> str: nonlocal string_format_identifier _UpperCAmelCase : Optional[int] = '''[''' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(A ) for row_vector in self.array ) return s def __repr__( self ) -> str: return str(self ) def __lowerCAmelCase ( self , A ) -> bool: if not (isinstance(A , (list, tuple) ) and len(A ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self , A ) -> Any: assert self.validate_indicies(A ) return self.array[loc[0]][loc[1]] def __setitem__( self , A , A ) -> None: assert self.validate_indicies(A ) _UpperCAmelCase : List[str] = value def __add__( self , A ) -> Matrix: assert isinstance(A , A ) assert self.row == another.row and self.column == another.column # Add _UpperCAmelCase : Any = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase : List[Any] = self[r, c] + another[r, c] return result def __neg__( self ) -> Matrix: _UpperCAmelCase : str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase : Any = -self[r, c] return result def __sub__( self , A ) -> Matrix: return self + (-another) def __mul__( self , A ) -> Matrix: if isinstance(A , (int, float) ): # Scalar multiplication _UpperCAmelCase : str = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase : Optional[Any] = self[r, c] * another return result elif isinstance(A , A ): # Matrix multiplication assert self.column == another.row _UpperCAmelCase : Dict = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: _UpperCAmelCase : List[str] = f'Unsupported type given for another ({type(A )})' raise TypeError(A ) def __lowerCAmelCase ( self ) -> Matrix: _UpperCAmelCase : str = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): _UpperCAmelCase : Tuple = self[r, c] return result def __lowerCAmelCase ( self , A , A ) -> Any: assert isinstance(A , A ) and isinstance(A , A ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate _UpperCAmelCase : Any = v.transpose() _UpperCAmelCase : Optional[Any] = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def lowerCamelCase_ (): # a^(-1) _UpperCAmelCase : List[Any] = Matrix(3 , 3 , 0 ) for i in range(3 ): _UpperCAmelCase : List[str] = 1 print(F'a^(-1) is {ainv}' ) # u, v _UpperCAmelCase : int = Matrix(3 , 1 , 0 ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = 1, 2, -3 _UpperCAmelCase : str = Matrix(3 , 1 , 0 ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = 4, -2, 5 print(F'u is {u}' ) print(F'v is {v}' ) print(F'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(F'(a + uv^T)^(-1) is {ainv.sherman_morrison(UpperCamelCase__ , UpperCamelCase__ )}' ) def lowerCamelCase_ (): import doctest doctest.testmod() testa()
263
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =IFImgaImgSuperResolutionPipeline a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Any = torch.manual_seed(A ) else: _UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
263
1
"""simple docstring""" import math def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): if ( not isinstance(UpperCamelCase__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * power_factor def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): if ( not isinstance(UpperCamelCase__ , (int, float) ) or power_factor < -1 or power_factor > 1 ): raise ValueError('''power_factor must be a valid float value between -1 and 1.''' ) return apparent_power * math.sqrt(1 - power_factor**2 ) if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) _UpperCAmelCase : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _lowerCAmelCase :Dict = logging.get_logger(__name__) _lowerCAmelCase :Tuple = { 'microsoft/swin-tiny-patch4-window7-224': ( 'https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json' ), # See all Swin models at https://huggingface.co/models?filter=swin } class _UpperCAmelCase ( a ,a ): '''simple docstring''' a__ ='''swin''' a__ ={ '''num_attention_heads''': '''num_heads''', '''num_hidden_layers''': '''num_layers''', } def __init__( self , A=2_2_4 , A=4 , A=3 , A=9_6 , A=[2, 2, 6, 2] , A=[3, 6, 1_2, 2_4] , A=7 , A=4.0 , A=True , A=0.0 , A=0.0 , A=0.1 , A="gelu" , A=False , A=0.02 , A=1E-5 , A=3_2 , A=None , A=None , **A , ) -> List[Any]: super().__init__(**A ) _UpperCAmelCase : List[Any] = image_size _UpperCAmelCase : Tuple = patch_size _UpperCAmelCase : str = num_channels _UpperCAmelCase : Optional[Any] = embed_dim _UpperCAmelCase : str = depths _UpperCAmelCase : Dict = len(A ) _UpperCAmelCase : Dict = num_heads _UpperCAmelCase : Tuple = window_size _UpperCAmelCase : Optional[int] = mlp_ratio _UpperCAmelCase : Any = qkv_bias _UpperCAmelCase : Optional[int] = hidden_dropout_prob _UpperCAmelCase : Optional[int] = attention_probs_dropout_prob _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : int = hidden_act _UpperCAmelCase : Union[str, Any] = use_absolute_embeddings _UpperCAmelCase : Optional[int] = layer_norm_eps _UpperCAmelCase : Optional[int] = initializer_range _UpperCAmelCase : List[str] = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model _UpperCAmelCase : Union[str, Any] = int(embed_dim * 2 ** (len(A ) - 1) ) _UpperCAmelCase : Dict = ['''stem'''] + [f'stage{idx}' for idx in range(1 , len(A ) + 1 )] _UpperCAmelCase , _UpperCAmelCase : Any = get_aligned_output_features_output_indices( out_features=A , out_indices=A , stage_names=self.stage_names ) class _UpperCAmelCase ( a ): '''simple docstring''' a__ =version.parse('''1.11''' ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
263
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ ) _UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model'''] _UpperCAmelCase : Optional[Any] = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase : Any = {} _UpperCAmelCase : Any = '''first_stage_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase : Tuple = {} _UpperCAmelCase : int = '''model.diffusion_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] _UpperCAmelCase : List[str] = config.model.params.first_stage_config.params _UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params _UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : int = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) _lowerCAmelCase :List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
263
1
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : str = pipeline( task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = pipeline( task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , ) # This is an audio of a dog _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ] , ) _UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) _UpperCAmelCase : Tuple = audio_classifier( [audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> int: pass
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) _lowerCAmelCase :Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''falcon''' a__ =['''past_key_values'''] def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any: _UpperCAmelCase : int = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A ) _UpperCAmelCase : int = hidden_size if n_embed is None else n_embed _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Optional[int] = use_cache _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase : Dict = alibi _UpperCAmelCase : Optional[int] = new_decoder_architecture _UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase : Optional[int] = parallel_attn _UpperCAmelCase : Optional[int] = bias super().__init__(bos_token_id=A , eos_token_id=A , **A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> List[Any]: return not self.alibi
263
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : '''simple docstring''' @staticmethod def __lowerCAmelCase ( *A , **A ) -> Union[str, Any]: pass @is_pipeline_test @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' a__ =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def __lowerCAmelCase ( self , A , A , A ) -> Optional[Any]: _UpperCAmelCase : int = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) _UpperCAmelCase : Optional[Any] = [ { '''image''': Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''question''': '''How many cats are there?''', }, { '''image''': '''./tests/fixtures/tests_samples/COCO/000000039769.png''', '''question''': '''How many cats are there?''', }, ] return vqa_pipeline, examples def __lowerCAmelCase ( self , A , A ) -> Any: _UpperCAmelCase : str = vqa_pipeline(A , top_k=1 ) self.assertEqual( A , [ [{'''score''': ANY(A ), '''answer''': ANY(A )}], [{'''score''': ANY(A ), '''answer''': ANY(A )}], ] , ) @require_torch def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Dict = pipeline('''visual-question-answering''' , model='''hf-internal-testing/tiny-vilt-random-vqa''' ) _UpperCAmelCase : List[Any] = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' _UpperCAmelCase : Tuple = '''How many cats are there?''' _UpperCAmelCase : List[str] = vqa_pipeline(image=A , question='''How many cats are there?''' , top_k=2 ) self.assertEqual( A , [{'''score''': ANY(A ), '''answer''': ANY(A )}, {'''score''': ANY(A ), '''answer''': ANY(A )}] ) _UpperCAmelCase : Any = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( A , [{'''score''': ANY(A ), '''answer''': ANY(A )}, {'''score''': ANY(A ), '''answer''': ANY(A )}] ) @slow @require_torch def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Optional[int] = pipeline('''visual-question-answering''' , model='''dandelin/vilt-b32-finetuned-vqa''' ) _UpperCAmelCase : List[str] = '''./tests/fixtures/tests_samples/COCO/000000039769.png''' _UpperCAmelCase : Optional[int] = '''How many cats are there?''' _UpperCAmelCase : int = vqa_pipeline(image=A , question=A , top_k=2 ) self.assertEqual( nested_simplify(A , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] ) _UpperCAmelCase : int = vqa_pipeline({'''image''': image, '''question''': question} , top_k=2 ) self.assertEqual( nested_simplify(A , decimals=4 ) , [{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}] ) _UpperCAmelCase : Union[str, Any] = vqa_pipeline( [{'''image''': image, '''question''': question}, {'''image''': image, '''question''': question}] , top_k=2 ) self.assertEqual( nested_simplify(A , decimals=4 ) , [[{'''score''': 0.8_799, '''answer''': '''2'''}, {'''score''': 0.296, '''answer''': '''1'''}]] * 2 , ) @require_tf @unittest.skip('''Visual question answering not implemented in TF''' ) def __lowerCAmelCase ( self ) -> Dict: pass
263
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
1
"""simple docstring""" from math import isqrt def lowerCamelCase_ (UpperCamelCase__ : int ): _UpperCAmelCase : Dict = [True] * max_number for i in range(2 , isqrt(max_number - 1 ) + 1 ): if is_prime[i]: for j in range(i**2 , UpperCamelCase__ , UpperCamelCase__ ): _UpperCAmelCase : Dict = False return [i for i in range(2 , UpperCamelCase__ ) if is_prime[i]] def lowerCamelCase_ (UpperCamelCase__ : int = 10**8 ): _UpperCAmelCase : Dict = calculate_prime_numbers(max_number // 2 ) _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Union[str, Any] = len(UpperCamelCase__ ) - 1 while left <= right: while prime_numbers[left] * prime_numbers[right] >= max_number: right -= 1 semiprimes_count += right - left + 1 left += 1 return semiprimes_count if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase :Tuple = tuple[int, int] class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : set[int] = vertices _UpperCAmelCase : dict[EdgeT, int] = { (min(A ), max(A )): weight for edge, weight in edges.items() } def __lowerCAmelCase ( self , A , A ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _UpperCAmelCase : List[Any] = weight def __lowerCAmelCase ( self ) -> Graph: _UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} ) _UpperCAmelCase : EdgeT _UpperCAmelCase : int _UpperCAmelCase : EdgeT _UpperCAmelCase : int while len(subgraph.vertices ) < len(self.vertices ): _UpperCAmelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _UpperCAmelCase : Tuple = edge _UpperCAmelCase : Optional[int] = weight subgraph.add_edge(A , A ) return subgraph def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ): _UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : dict[EdgeT, int] = {} _UpperCAmelCase : list[str] _UpperCAmelCase : int _UpperCAmelCase : int with open(UpperCamelCase__ ) as f: _UpperCAmelCase : str = f.read().strip().split('''\n''' ) _UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": _UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] ) _UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ ) _UpperCAmelCase : Graph = graph.prims_algorithm() _UpperCAmelCase : int = sum(graph.edges.values() ) _UpperCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
263
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL _lowerCAmelCase :str = logging.get_logger(__name__) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ): _UpperCAmelCase : int = b.T _UpperCAmelCase : Optional[int] = np.sum(np.square(UpperCamelCase__ ) , axis=1 ) _UpperCAmelCase : Any = np.sum(np.square(UpperCamelCase__ ) , axis=0 ) _UpperCAmelCase : Any = np.matmul(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : List[str] = aa[:, None] - 2 * ab + ba[None, :] return d def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ): _UpperCAmelCase : Union[str, Any] = x.reshape(-1 , 3 ) _UpperCAmelCase : Optional[Any] = squared_euclidean_distance(UpperCamelCase__ , UpperCamelCase__ ) return np.argmin(UpperCamelCase__ , axis=1 ) class _UpperCAmelCase ( a ): '''simple docstring''' a__ =['''pixel_values'''] def __init__( self , A = None , A = True , A = None , A = PILImageResampling.BILINEAR , A = True , A = True , **A , ) -> None: super().__init__(**A ) _UpperCAmelCase : List[Any] = size if size is not None else {'''height''': 2_5_6, '''width''': 2_5_6} _UpperCAmelCase : Any = get_size_dict(A ) _UpperCAmelCase : Union[str, Any] = np.array(A ) if clusters is not None else None _UpperCAmelCase : str = do_resize _UpperCAmelCase : Tuple = size _UpperCAmelCase : Any = resample _UpperCAmelCase : Dict = do_normalize _UpperCAmelCase : Optional[int] = do_color_quantize def __lowerCAmelCase ( self , A , A , A = PILImageResampling.BILINEAR , A = None , **A , ) -> np.ndarray: _UpperCAmelCase : int = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(f'Size dictionary must contain both height and width keys. Got {size.keys()}' ) return resize( A , size=(size['''height'''], size['''width''']) , resample=A , data_format=A , **A ) def __lowerCAmelCase ( self , A , A = None , ) -> np.ndarray: _UpperCAmelCase : Optional[Any] = rescale(image=A , scale=1 / 127.5 , data_format=A ) _UpperCAmelCase : List[str] = image - 1 return image def __lowerCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image: _UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase : List[Any] = size if size is not None else self.size _UpperCAmelCase : Union[str, Any] = get_size_dict(A ) _UpperCAmelCase : List[Any] = resample if resample is not None else self.resample _UpperCAmelCase : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase : Optional[int] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize _UpperCAmelCase : Dict = clusters if clusters is not None else self.clusters _UpperCAmelCase : Union[str, Any] = np.array(A ) _UpperCAmelCase : str = make_list_of_images(A ) if not valid_images(A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_color_quantize and clusters is None: raise ValueError('''Clusters must be specified if do_color_quantize is True.''' ) # All transformations expect numpy arrays. _UpperCAmelCase : Tuple = [to_numpy_array(A ) for image in images] if do_resize: _UpperCAmelCase : Dict = [self.resize(image=A , size=A , resample=A ) for image in images] if do_normalize: _UpperCAmelCase : str = [self.normalize(image=A ) for image in images] if do_color_quantize: _UpperCAmelCase : Optional[int] = [to_channel_dimension_format(A , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) _UpperCAmelCase : int = np.array(A ) _UpperCAmelCase : Tuple = color_quantize(A , A ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) _UpperCAmelCase : List[Any] = images.shape[0] _UpperCAmelCase : int = images.reshape(A , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. _UpperCAmelCase : int = list(A ) else: _UpperCAmelCase : List[str] = [to_channel_dimension_format(A , A ) for image in images] _UpperCAmelCase : Optional[Any] = {'''input_ids''': images} return BatchFeature(data=A , tensor_type=A )
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''mgp-str''' def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]: super().__init__(**A ) _UpperCAmelCase : Any = image_size _UpperCAmelCase : str = patch_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Dict = max_token_length _UpperCAmelCase : Optional[Any] = num_character_labels _UpperCAmelCase : int = num_bpe_labels _UpperCAmelCase : List[str] = num_wordpiece_labels _UpperCAmelCase : Optional[int] = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : List[Any] = mlp_ratio _UpperCAmelCase : List[str] = distilled _UpperCAmelCase : Optional[int] = layer_norm_eps _UpperCAmelCase : str = drop_rate _UpperCAmelCase : List[Any] = qkv_bias _UpperCAmelCase : List[str] = attn_drop_rate _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : Union[str, Any] = output_aa_attentions _UpperCAmelCase : List[str] = initializer_range
263
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase :Tuple = logging.get_logger(__name__) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=False ): _UpperCAmelCase : Optional[int] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" _UpperCAmelCase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : int=False ): for i in range(config.num_hidden_layers ): if base_model: _UpperCAmelCase : Union[str, Any] = '''''' else: _UpperCAmelCase : List[Any] = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase : List[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) _UpperCAmelCase : int = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase : Any = in_proj_weight[ : config.hidden_size, : ] _UpperCAmelCase : Tuple = in_proj_bias[: config.hidden_size] _UpperCAmelCase : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase : Any = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase : int = in_proj_weight[ -config.hidden_size :, : ] _UpperCAmelCase : Optional[Any] = in_proj_bias[-config.hidden_size :] def lowerCamelCase_ (UpperCamelCase__ : Any ): _UpperCAmelCase : Dict = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] ): _UpperCAmelCase : List[Any] = dct.pop(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = val def lowerCamelCase_ (): _UpperCAmelCase : Tuple = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCAmelCase : int = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int=True ): _UpperCAmelCase : List[Any] = ViTConfig() # patch_size if model_name[-1] == "8": _UpperCAmelCase : int = 8 # set labels if required if not base_model: _UpperCAmelCase : Optional[Any] = 1000 _UpperCAmelCase : str = '''huggingface/label-files''' _UpperCAmelCase : str = '''imagenet-1k-id2label.json''' _UpperCAmelCase : Tuple = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) ) _UpperCAmelCase : Union[str, Any] = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _UpperCAmelCase : Tuple = idalabel _UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} # size of the architecture if model_name in ["dino_vits8", "dino_vits16"]: _UpperCAmelCase : Tuple = 384 _UpperCAmelCase : Optional[Any] = 1536 _UpperCAmelCase : str = 12 _UpperCAmelCase : Union[str, Any] = 6 # load original model from torch hub _UpperCAmelCase : Optional[Any] = torch.hub.load('''facebookresearch/dino:main''' , UpperCamelCase__ ) original_model.eval() # load state_dict of original model, remove and rename some keys _UpperCAmelCase : List[Any] = original_model.state_dict() if base_model: remove_classification_head_(UpperCamelCase__ ) _UpperCAmelCase : str = create_rename_keys(UpperCamelCase__ , base_model=UpperCamelCase__ ) for src, dest in rename_keys: rename_key(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) read_in_q_k_v(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # load HuggingFace model if base_model: _UpperCAmelCase : int = ViTModel(UpperCamelCase__ , add_pooling_layer=UpperCamelCase__ ).eval() else: _UpperCAmelCase : List[Any] = ViTForImageClassification(UpperCamelCase__ ).eval() model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image, prepared by ViTImageProcessor _UpperCAmelCase : Tuple = ViTImageProcessor() _UpperCAmelCase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' ) _UpperCAmelCase : int = encoding['''pixel_values'''] _UpperCAmelCase : Tuple = model(UpperCamelCase__ ) if base_model: _UpperCAmelCase : Optional[int] = original_model(UpperCamelCase__ ) assert torch.allclose(UpperCamelCase__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 ) else: _UpperCAmelCase : Dict = original_model(UpperCamelCase__ ) assert logits.shape == outputs.logits.shape assert torch.allclose(UpperCamelCase__ , outputs.logits , atol=1E-3 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F'Saving model {model_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='dino_vitb16', type=str, help='Name of the model trained with DINO you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--base_model', action='store_true', help='Whether to only convert the base model (no projection head weights).', ) parser.set_defaults(base_model=True) _lowerCAmelCase :Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
263
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ): if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(UpperCamelCase__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) def lowerCamelCase_ (): _UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423] _UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
263
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models.modeling_utils import ModelMixin class _UpperCAmelCase ( a ,a ): '''simple docstring''' @register_to_config def __init__( self , A = 7_6_8 , ) -> Optional[Any]: super().__init__() _UpperCAmelCase : List[Any] = nn.Parameter(torch.zeros(1 , A ) ) _UpperCAmelCase : Dict = nn.Parameter(torch.ones(1 , A ) ) def __lowerCAmelCase ( self , A = None , A = None , ) -> Dict: _UpperCAmelCase : int = nn.Parameter(self.mean.to(A ).to(A ) ) _UpperCAmelCase : int = nn.Parameter(self.std.to(A ).to(A ) ) return self def __lowerCAmelCase ( self , A ) -> str: _UpperCAmelCase : Union[str, Any] = (embeds - self.mean) * 1.0 / self.std return embeds def __lowerCAmelCase ( self , A ) -> Optional[int]: _UpperCAmelCase : Tuple = (embeds * self.std) + self.mean return embeds
263
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCAmelCase :Optional[Any] = False class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) _UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = generator.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = '''cyberpunk 2077''' _UpperCAmelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.dual_guided( prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images _UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger ''' _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.text_to_image( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images _UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images _UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
263
1
"""simple docstring""" import argparse import logging import os import sys import numpy as np import onnxruntime import torch from bart_onnx.generation_onnx import BARTBeamSearchGenerator from bart_onnx.reduce_onnx_size import remove_dup_initializers import transformers from transformers import BartForConditionalGeneration, BartTokenizer logging.basicConfig( format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s', datefmt='%Y-%m-%d %H:%M:%S', level=os.environ.get('LOGLEVEL', 'INFO').upper(), stream=sys.stdout, ) _lowerCAmelCase :Dict = logging.getLogger(__name__) _lowerCAmelCase :Union[str, Any] = {'facebook/bart-base': BartForConditionalGeneration} _lowerCAmelCase :Dict = {'facebook/bart-base': BartTokenizer} def lowerCamelCase_ (): _UpperCAmelCase : Any = argparse.ArgumentParser(description='''Export Bart model + Beam Search to ONNX graph.''' ) parser.add_argument( '''--validation_file''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''A csv or a json file containing the validation data.''' ) parser.add_argument( '''--max_length''' , type=UpperCamelCase__ , default=5 , help='''The maximum total input sequence length after tokenization.''' , ) parser.add_argument( '''--num_beams''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help=( '''Number of beams to use for evaluation. This argument will be ''' '''passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.''' ) , ) parser.add_argument( '''--model_name_or_path''' , type=UpperCamelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models.''' , required=UpperCamelCase__ , ) parser.add_argument( '''--config_name''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Pretrained config name or path if not the same as model_name''' , ) parser.add_argument( '''--device''' , type=UpperCamelCase__ , default='''cpu''' , help='''Device where the model will be run''' , ) parser.add_argument('''--output_file_path''' , type=UpperCamelCase__ , default=UpperCamelCase__ , help='''Where to store the final ONNX file.''' ) _UpperCAmelCase : Optional[int] = parser.parse_args() return args def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int]="cpu" ): _UpperCAmelCase : Tuple = model_dict[model_name].from_pretrained(UpperCamelCase__ ).to(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = tokenizer_dict[model_name].from_pretrained(UpperCamelCase__ ) if model_name in ["facebook/bart-base"]: _UpperCAmelCase : Union[str, Any] = 0 _UpperCAmelCase : List[Any] = None _UpperCAmelCase : Optional[Any] = 0 return huggingface_model, tokenizer def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] ): model.eval() _UpperCAmelCase : str = None _UpperCAmelCase : List[str] = torch.jit.script(BARTBeamSearchGenerator(UpperCamelCase__ ) ) with torch.no_grad(): _UpperCAmelCase : Optional[Any] = '''My friends are cool but they eat too many carbs.''' _UpperCAmelCase : Tuple = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='''pt''' ).to(model.device ) _UpperCAmelCase : Optional[int] = model.generate( inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , num_beams=UpperCamelCase__ , max_length=UpperCamelCase__ , early_stopping=UpperCamelCase__ , decoder_start_token_id=model.config.decoder_start_token_id , ) torch.onnx.export( UpperCamelCase__ , ( inputs['''input_ids'''], inputs['''attention_mask'''], num_beams, max_length, model.config.decoder_start_token_id, ) , UpperCamelCase__ , opset_version=14 , input_names=['''input_ids''', '''attention_mask''', '''num_beams''', '''max_length''', '''decoder_start_token_id'''] , output_names=['''output_ids'''] , dynamic_axes={ '''input_ids''': {0: '''batch''', 1: '''seq'''}, '''output_ids''': {0: '''batch''', 1: '''seq_out'''}, } , example_outputs=UpperCamelCase__ , ) logger.info('''Model exported to {}'''.format(UpperCamelCase__ ) ) _UpperCAmelCase : int = remove_dup_initializers(os.path.abspath(UpperCamelCase__ ) ) logger.info('''Deduplicated and optimized model written to {}'''.format(UpperCamelCase__ ) ) _UpperCAmelCase : Optional[Any] = onnxruntime.InferenceSession(UpperCamelCase__ ) _UpperCAmelCase : Dict = ort_sess.run( UpperCamelCase__ , { '''input_ids''': inputs['''input_ids'''].cpu().numpy(), '''attention_mask''': inputs['''attention_mask'''].cpu().numpy(), '''num_beams''': np.array(UpperCamelCase__ ), '''max_length''': np.array(UpperCamelCase__ ), '''decoder_start_token_id''': np.array(model.config.decoder_start_token_id ), } , ) np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1E-3 , atol=1E-3 ) logger.info('''Model outputs from torch and ONNX Runtime are similar.''' ) logger.info('''Success.''' ) def lowerCamelCase_ (): _UpperCAmelCase : Union[str, Any] = parse_args() _UpperCAmelCase : Optional[Any] = 5 _UpperCAmelCase : Optional[int] = 4 # Make one log on every process with the configuration for debugging. logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , ) logger.setLevel(logging.INFO ) transformers.utils.logging.set_verbosity_error() _UpperCAmelCase : Union[str, Any] = torch.device(args.device ) _UpperCAmelCase , _UpperCAmelCase : Dict = load_model_tokenizer(args.model_name_or_path , UpperCamelCase__ ) if model.config.decoder_start_token_id is None: raise ValueError('''Make sure that `config.decoder_start_token_id` is correctly defined''' ) model.to(UpperCamelCase__ ) if args.max_length: _UpperCAmelCase : Dict = args.max_length if args.num_beams: _UpperCAmelCase : Any = args.num_beams if args.output_file_path: _UpperCAmelCase : Optional[int] = args.output_file_path else: _UpperCAmelCase : Optional[Any] = '''BART.onnx''' logger.info('''Exporting model to ONNX''' ) export_and_validate_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
263
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCAmelCase :Any = False @skip_mps class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ): '''simple docstring''' a__ =StableDiffusionAttendAndExcitePipeline a__ =False a__ =TEXT_TO_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) a__ =TEXT_TO_IMAGE_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __lowerCAmelCase ( cls ) -> List[str]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , ) _UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) _UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _UpperCAmelCase : List[str] = CLIPTextModel(A ) _UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCAmelCase : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: _UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : List[str] = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = '''cpu''' _UpperCAmelCase : Tuple = self.get_dummy_components() _UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Dict = self.get_dummy_inputs(A ) _UpperCAmelCase : Union[str, Any] = pipe(**A ).images _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) _UpperCAmelCase : int = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) _UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1E-3 ) def __lowerCAmelCase ( self ) -> Dict: super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> str: super().test_save_load_local(expected_max_difference=5E-4 ) def __lowerCAmelCase ( self ) -> Optional[int]: super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = torch.manual_seed(5_1 ) _UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses''' _UpperCAmelCase : int = [5, 7] _UpperCAmelCase : Dict = pipe( prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _UpperCAmelCase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
263
1
"""simple docstring""" from unittest import TestCase from datasets import Dataset from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters def lowerCamelCase_ (): _UpperCAmelCase : Union[str, Any] = { '''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''], '''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''], '''content''': ['''a ''' * 20, '''a ''' * 30, '''b ''' * 7], } _UpperCAmelCase : Dict = Dataset.from_dict(UpperCamelCase__ ) return dataset class _UpperCAmelCase ( a ): '''simple docstring''' def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : int = get_dataset() _UpperCAmelCase : int = make_duplicate_clusters(A , 0.85 ) self.assertEqual(len(duplicate_clusters[0] ) , 2 ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : int = get_dataset() _UpperCAmelCase , _UpperCAmelCase : Tuple = deduplicate_dataset(A ) self.assertEqual(len(A ) , 2 ) print(A ) self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 ) self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , A )
263
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[str] = -1 _UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : str = TextStreamer(A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : List[str] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[Any] = -1 _UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] ) _UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A ) _UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A ) thread.start() _UpperCAmelCase : Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Any = -1 _UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :] _UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Optional[int]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' ) _UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A ) _UpperCAmelCase : Tuple = -1 _UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A ) model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n" _UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Dict = -1 _UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 ) _UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(A ): _UpperCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
263
1
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer _lowerCAmelCase :Dict = logging.get_logger(__name__) _lowerCAmelCase :Any = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} _lowerCAmelCase :Any = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } _lowerCAmelCase :Union[str, Any] = { 'Salesforce/codegen-350M-mono': 2_048, } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] a__ =CodeGenTokenizer def __init__( self , A=None , A=None , A=None , A="<|endoftext|>" , A="<|endoftext|>" , A="<|endoftext|>" , A=False , **A , ) -> Optional[int]: super().__init__( A , A , tokenizer_file=A , unk_token=A , bos_token=A , eos_token=A , add_prefix_space=A , **A , ) if kwargs.pop('''add_bos_token''' , A ): _UpperCAmelCase : str = kwargs.pop('''name_or_path''' , '''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' f'`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n' f'`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n' '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) _UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , A ) != add_prefix_space: _UpperCAmelCase : Optional[Any] = getattr(A , pre_tok_state.pop('''type''' ) ) _UpperCAmelCase : Any = add_prefix_space _UpperCAmelCase : Any = pre_tok_class(**A ) _UpperCAmelCase : Any = add_prefix_space def __lowerCAmelCase ( self , *A , **A ) -> BatchEncoding: _UpperCAmelCase : Optional[Any] = kwargs.get('''is_split_into_words''' , A ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*A , **A ) def __lowerCAmelCase ( self , *A , **A ) -> BatchEncoding: _UpperCAmelCase : Optional[int] = kwargs.get('''is_split_into_words''' , A ) assert self.add_prefix_space or not is_split_into_words, ( f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*A , **A ) def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: _UpperCAmelCase : Optional[int] = self._tokenizer.model.save(A , name=A ) return tuple(A ) def __lowerCAmelCase ( self , A , A = False , A = None , A = None , **A , ) -> str: _UpperCAmelCase : Dict = super().decode( token_ids=A , skip_special_tokens=A , clean_up_tokenization_spaces=A , **A , ) if truncate_before_pattern is not None and len(A ) > 0: _UpperCAmelCase : Tuple = self.truncate(A , A ) return decoded_text def __lowerCAmelCase ( self , A , A ) -> Optional[Any]: def find_re(A , A , A ): _UpperCAmelCase : Any = pattern.search(A , A ) return m.start() if m else -1 _UpperCAmelCase : Any = [re.compile(A , re.MULTILINE ) for pattern in truncate_before_pattern] _UpperCAmelCase : str = list(re.finditer('''^print''' , A , re.MULTILINE ) ) if len(A ) > 1: _UpperCAmelCase : Tuple = completion[: prints[1].start()] _UpperCAmelCase : List[Any] = list(re.finditer('''^def''' , A , re.MULTILINE ) ) if len(A ) > 1: _UpperCAmelCase : Optional[int] = completion[: defs[1].start()] _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Optional[Any] = [ pos for pos in [find_re(A , A , A ) for terminal in terminals] if pos != -1 ] if len(A ) > 0: return completion[: min(A )] else: return completion
263
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_ (UpperCamelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0] def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
263
1
"""simple docstring""" from math import ceil def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any ): _UpperCAmelCase : Tuple = list(range(0 , UpperCamelCase__ ) ) _UpperCAmelCase : Any = [item for sublist in list(device_map.values() ) for item in sublist] # Duplicate check _UpperCAmelCase : str = [] for i in device_map_blocks: if device_map_blocks.count(UpperCamelCase__ ) > 1 and i not in duplicate_blocks: duplicate_blocks.append(UpperCamelCase__ ) # Missing blocks _UpperCAmelCase : Union[str, Any] = [i for i in blocks if i not in device_map_blocks] _UpperCAmelCase : Optional[Any] = [i for i in device_map_blocks if i not in blocks] if len(UpperCamelCase__ ) != 0: raise ValueError( '''Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device.''' ''' These attention blocks were specified more than once: ''' + str(UpperCamelCase__ ) ) if len(UpperCamelCase__ ) != 0: raise ValueError( '''There are attention blocks for this model that are not specified in the device_map. Add these attention ''' '''blocks to a device on the device_map: ''' + str(UpperCamelCase__ ) ) if len(UpperCamelCase__ ) != 0: raise ValueError( '''The device_map contains more attention blocks than this model has. Remove these from the device_map:''' + str(UpperCamelCase__ ) ) def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): _UpperCAmelCase : Tuple = list(range(UpperCamelCase__ ) ) _UpperCAmelCase : Optional[Any] = int(ceil(n_layers / len(UpperCamelCase__ ) ) ) _UpperCAmelCase : Optional[Any] = [layers[i : i + n_blocks] for i in range(0 , UpperCamelCase__ , UpperCamelCase__ )] return dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" _UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:] _UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" from math import isqrt def lowerCamelCase_ (UpperCamelCase__ : int ): return all(number % divisor != 0 for divisor in range(2 , isqrt(UpperCamelCase__ ) + 1 ) ) def lowerCamelCase_ (UpperCamelCase__ : int = 10**6 ): _UpperCAmelCase : Optional[int] = 0 _UpperCAmelCase : Tuple = 1 _UpperCAmelCase : Tuple = 7 while prime_candidate < max_prime: primes_count += is_prime(UpperCamelCase__ ) cube_index += 1 prime_candidate += 6 * cube_index return primes_count if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : int ): if exponent == 1: return base if exponent % 2 == 0: _UpperCAmelCase : List[str] = _modexpt(UpperCamelCase__ , exponent // 2 , UpperCamelCase__ ) % modulo_value return (x * x) % modulo_value else: return (base * _modexpt(UpperCamelCase__ , exponent - 1 , UpperCamelCase__ )) % modulo_value def lowerCamelCase_ (UpperCamelCase__ : int = 1777 , UpperCamelCase__ : int = 1855 , UpperCamelCase__ : int = 8 ): _UpperCAmelCase : Dict = base for _ in range(1 , UpperCamelCase__ ): _UpperCAmelCase : str = _modexpt(UpperCamelCase__ , UpperCamelCase__ , 10**digits ) return result if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) _lowerCAmelCase :List[str] = '▁' _lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'} _lowerCAmelCase :List[Any] = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } _lowerCAmelCase :Tuple = { 'xlm-roberta-base': 512, 'xlm-roberta-large': 512, 'xlm-roberta-large-finetuned-conll02-dutch': 512, 'xlm-roberta-large-finetuned-conll02-spanish': 512, 'xlm-roberta-large-finetuned-conll03-english': 512, 'xlm-roberta-large-finetuned-conll03-german': 512, } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token _UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) _UpperCAmelCase : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset _UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = self.__dict__.copy() _UpperCAmelCase : List[str] = None _UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , A ) -> Optional[int]: _UpperCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self , A , A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : Any = [self.cls_token_id] _UpperCAmelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Dict = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , A ) -> List[str]: return self.sp_model.encode(A , out_type=A ) def __lowerCAmelCase ( self , A ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase : Any = self.sp_model.PieceToId(A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self , A ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , A ) -> int: _UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip() return out_string def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : List[Any] = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A ) elif not os.path.isfile(self.vocab_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : str = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
263
1
"""simple docstring""" import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version('>=', FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _lowerCAmelCase :int = get_logger(__name__) def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any]=0 ): os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with FSDP.state_dict_type( UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): _UpperCAmelCase : Optional[Any] = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: _UpperCAmelCase : Union[str, Any] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' _UpperCAmelCase : Dict = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) if accelerator.process_index == 0: logger.info(F'Saving model to {output_model_file}' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: _UpperCAmelCase : Dict = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) _UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Saving model to {output_model_file}' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , F'{MODEL_NAME}_{model_index}' ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) logger.info(F'Saving model to {ckpt_dir}' ) _UpperCAmelCase : List[str] = {'''model''': state_dict} dist_cp.save_state_dict( state_dict=UpperCamelCase__ , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Model saved to {ckpt_dir}' ) def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(UpperCamelCase__ ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( '''Set the `sync_module_states` flag to `True` so that model states are synced across processes when ''' '''initializing FSDP object''' ) return _UpperCAmelCase : Union[str, Any] = F'{MODEL_NAME}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}.bin' _UpperCAmelCase : List[str] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Loading model from {input_model_file}' ) _UpperCAmelCase : List[str] = torch.load(UpperCamelCase__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: _UpperCAmelCase : Optional[Any] = ( F'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else F'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) _UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Loading model from {input_model_file}' ) _UpperCAmelCase : Tuple = torch.load(UpperCamelCase__ ) logger.info(F'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: _UpperCAmelCase : Union[str, Any] = ( os.path.join(UpperCamelCase__ , F'{MODEL_NAME}_{model_index}' ) if F'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(F'Loading model from {ckpt_dir}' ) _UpperCAmelCase : Dict = {'''model''': model.state_dict()} dist_cp.load_state_dict( state_dict=UpperCamelCase__ , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , planner=DefaultLoadPlanner() , ) _UpperCAmelCase : Any = state_dict['''model'''] logger.info(F'Model loaded from {ckpt_dir}' ) model.load_state_dict(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any]=0 ): os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) with FSDP.state_dict_type( UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): _UpperCAmelCase : Optional[int] = FSDP.optim_state_dict(UpperCamelCase__ , UpperCamelCase__ ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: _UpperCAmelCase : Tuple = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) _UpperCAmelCase : Optional[int] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Saving Optimizer state to {output_optimizer_file}' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Optimizer state saved in {output_optimizer_file}' ) else: _UpperCAmelCase : Tuple = os.path.join(UpperCamelCase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) logger.info(F'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={'''optimizer''': optim_state} , storage_writer=dist_cp.FileSystemWriter(UpperCamelCase__ ) , planner=DefaultSavePlanner() , ) logger.info(F'Optimizer state saved in {ckpt_dir}' ) def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=0 ): accelerator.wait_for_everyone() with FSDP.state_dict_type( UpperCamelCase__ , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: _UpperCAmelCase : List[str] = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: _UpperCAmelCase : Any = ( F'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else F'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) _UpperCAmelCase : Dict = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) logger.info(F'Loading Optimizer state from {input_optimizer_file}' ) _UpperCAmelCase : Tuple = torch.load(UpperCamelCase__ ) logger.info(F'Optimizer state loaded from {input_optimizer_file}' ) else: _UpperCAmelCase : Union[str, Any] = ( os.path.join(UpperCamelCase__ , F'{OPTIMIZER_NAME}_{optimizer_index}' ) if F'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(F'Loading Optimizer from {ckpt_dir}' ) _UpperCAmelCase : Tuple = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key='''optimizer''' , storage_reader=dist_cp.FileSystemReader(UpperCamelCase__ ) , ) _UpperCAmelCase : List[Any] = optim_state['''optimizer'''] logger.info(F'Optimizer loaded from {ckpt_dir}' ) _UpperCAmelCase : Union[str, Any] = FSDP.optim_state_dict_to_load(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) optimizer.load_state_dict(UpperCamelCase__ )
263
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , *A , **A ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , A , ) super().__init__(*A , **A )
263
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase :Dict = { 'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json', 'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''mobilenet_v1''' def __init__( self , A=3 , A=2_2_4 , A=1.0 , A=8 , A="relu6" , A=True , A=0.999 , A=0.02 , A=0.001 , **A , ) -> Optional[Any]: super().__init__(**A ) if depth_multiplier <= 0: raise ValueError('''depth_multiplier must be greater than zero.''' ) _UpperCAmelCase : Optional[int] = num_channels _UpperCAmelCase : Any = image_size _UpperCAmelCase : Dict = depth_multiplier _UpperCAmelCase : Tuple = min_depth _UpperCAmelCase : Optional[int] = hidden_act _UpperCAmelCase : Optional[Any] = tf_padding _UpperCAmelCase : int = classifier_dropout_prob _UpperCAmelCase : Union[str, Any] = initializer_range _UpperCAmelCase : Union[str, Any] = layer_norm_eps class _UpperCAmelCase ( a ): '''simple docstring''' a__ =version.parse('''1.11''' ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([('''pixel_values''', {0: '''batch'''})] ) @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([('''logits''', {0: '''batch'''})] ) else: return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] ) @property def __lowerCAmelCase ( self ) -> float: return 1E-4
263
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ): # Load configuration defined in the metadata file with open(UpperCamelCase__ ) as metadata_file: _UpperCAmelCase : Dict = json.load(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' ) # Load the entity vocab file _UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens _UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name] _UpperCAmelCase : Tuple = state_dict[prefix + matrix_name] _UpperCAmelCase : str = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']] _UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval() _UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )): raise ValueError( '''Unexpected keys''' F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs _UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' ) _UpperCAmelCase : List[str] = ( '''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the''' ''' new world number one avoid a humiliating second- round exit at Wimbledon .''' ) _UpperCAmelCase : Dict = (39, 42) _UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' ) _UpperCAmelCase : List[Any] = model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": _UpperCAmelCase : str = torch.Size((1, 42, 1024) ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base _UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) ) _UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _UpperCAmelCase : int = torch.Size((1, 1, 1024) ) _UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base _UpperCAmelCase : List[str] = torch.Size((1, 1, 768) ) _UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ): _UpperCAmelCase : Any = {} with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(UpperCamelCase__ ): _UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' ) _UpperCAmelCase : Tuple = index return entity_vocab if __name__ == "__main__": _lowerCAmelCase :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) _lowerCAmelCase :Any = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
263
1
"""simple docstring""" import os from dataclasses import dataclass, field from io import BytesIO from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union import numpy as np import pyarrow as pa from .. import config from ..download.streaming_download_manager import xopen, xsplitext from ..table import array_cast from ..utils.py_utils import no_op_if_value_is_null, string_to_dict if TYPE_CHECKING: from .features import FeatureType _lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :List[Any] = False, False, False @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =None a__ =True a__ =True a__ =None # Automatically constructed a__ ="dict" a__ =pa.struct({'''bytes''': pa.binary(), '''path''': pa.string()} ) a__ =field(default='''Audio''' ,init=a ,repr=a ) def __call__( self ) -> Tuple: return self.pa_type def __lowerCAmelCase ( self , A ) -> dict: try: import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files. except ImportError as err: raise ImportError('''To support encoding audio data, please install \'soundfile\'.''' ) from err if isinstance(A , A ): return {"bytes": None, "path": value} elif isinstance(A , A ): return {"bytes": value, "path": None} elif "array" in value: # convert the audio array to wav bytes _UpperCAmelCase : Any = BytesIO() sf.write(A , value['''array'''] , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} elif value.get('''path''' ) is not None and os.path.isfile(value['''path'''] ): # we set "bytes": None to not duplicate the data if they're already available locally if value["path"].endswith('''pcm''' ): # "PCM" only has raw audio bytes if value.get('''sampling_rate''' ) is None: # At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate raise KeyError('''To use PCM files, please specify a \'sampling_rate\' in Audio object''' ) if value.get('''bytes''' ): # If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!) _UpperCAmelCase : Tuple = np.frombuffer(value['''bytes'''] , dtype=np.intaa ).astype(np.floataa ) / 3_2_7_6_7 else: _UpperCAmelCase : Tuple = np.memmap(value['''path'''] , dtype='''h''' , mode='''r''' ).astype(np.floataa ) / 3_2_7_6_7 _UpperCAmelCase : Union[str, Any] = BytesIO(bytes() ) sf.write(A , A , value['''sampling_rate'''] , format='''wav''' ) return {"bytes": buffer.getvalue(), "path": None} else: return {"bytes": None, "path": value.get('''path''' )} elif value.get('''bytes''' ) is not None or value.get('''path''' ) is not None: # store the audio bytes, and path is used to infer the audio format using the file extension return {"bytes": value.get('''bytes''' ), "path": value.get('''path''' )} else: raise ValueError( f'An audio sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' ) def __lowerCAmelCase ( self , A , A = None ) -> dict: if not self.decode: raise RuntimeError('''Decoding is disabled for this feature. Please use Audio(decode=True) instead.''' ) _UpperCAmelCase , _UpperCAmelCase : Any = (value['''path'''], BytesIO(value['''bytes'''] )) if value['''bytes'''] is not None else (value['''path'''], None) if path is None and file is None: raise ValueError(f'An audio sample should have one of \'path\' or \'bytes\' but both are None in {value}.' ) try: import librosa import soundfile as sf except ImportError as err: raise ImportError('''To support decoding audio files, please install \'librosa\' and \'soundfile\'.''' ) from err _UpperCAmelCase : Any = xsplitext(A )[1][1:].lower() if path is not None else None if not config.IS_OPUS_SUPPORTED and audio_format == "opus": raise RuntimeError( '''Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) elif not config.IS_MP3_SUPPORTED and audio_format == "mp3": raise RuntimeError( '''Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, ''' '''You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ''' ) if file is None: _UpperCAmelCase : List[str] = token_per_repo_id or {} _UpperCAmelCase : Tuple = path.split('''::''' )[-1] try: _UpperCAmelCase : List[Any] = string_to_dict(A , config.HUB_DATASETS_URL )['''repo_id'''] _UpperCAmelCase : int = token_per_repo_id[repo_id] except (ValueError, KeyError): _UpperCAmelCase : int = None with xopen(A , '''rb''' , use_auth_token=A ) as f: _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = sf.read(A ) else: _UpperCAmelCase , _UpperCAmelCase : Optional[int] = sf.read(A ) _UpperCAmelCase : List[str] = array.T if self.mono: _UpperCAmelCase : str = librosa.to_mono(A ) if self.sampling_rate and self.sampling_rate != sampling_rate: _UpperCAmelCase : int = librosa.resample(A , orig_sr=A , target_sr=self.sampling_rate ) _UpperCAmelCase : List[str] = self.sampling_rate return {"path": path, "array": array, "sampling_rate": sampling_rate} def __lowerCAmelCase ( self ) -> Union["FeatureType", Dict[str, "FeatureType"]]: from .features import Value if self.decode: raise ValueError('''Cannot flatten a decoded Audio feature.''' ) return { "bytes": Value('''binary''' ), "path": Value('''string''' ), } def __lowerCAmelCase ( self , A ) -> pa.StructArray: if pa.types.is_string(storage.type ): _UpperCAmelCase : List[str] = pa.array([None] * len(A ) , type=pa.binary() ) _UpperCAmelCase : int = pa.StructArray.from_arrays([bytes_array, storage] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_binary(storage.type ): _UpperCAmelCase : Optional[Any] = pa.array([None] * len(A ) , type=pa.string() ) _UpperCAmelCase : int = pa.StructArray.from_arrays([storage, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('''array''' ): _UpperCAmelCase : Any = pa.array([Audio().encode_example(A ) if x is not None else None for x in storage.to_pylist()] ) elif pa.types.is_struct(storage.type ): if storage.type.get_field_index('''bytes''' ) >= 0: _UpperCAmelCase : List[str] = storage.field('''bytes''' ) else: _UpperCAmelCase : int = pa.array([None] * len(A ) , type=pa.binary() ) if storage.type.get_field_index('''path''' ) >= 0: _UpperCAmelCase : Optional[Any] = storage.field('''path''' ) else: _UpperCAmelCase : Dict = pa.array([None] * len(A ) , type=pa.string() ) _UpperCAmelCase : int = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=storage.is_null() ) return array_cast(A , self.pa_type ) def __lowerCAmelCase ( self , A ) -> pa.StructArray: @no_op_if_value_is_null def path_to_bytes(A ): with xopen(A , '''rb''' ) as f: _UpperCAmelCase : Any = f.read() return bytes_ _UpperCAmelCase : Dict = pa.array( [ (path_to_bytes(x['''path'''] ) if x['''bytes'''] is None else x['''bytes''']) if x is not None else None for x in storage.to_pylist() ] , type=pa.binary() , ) _UpperCAmelCase : Tuple = pa.array( [os.path.basename(A ) if path is not None else None for path in storage.field('''path''' ).to_pylist()] , type=pa.string() , ) _UpperCAmelCase : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] , ['''bytes''', '''path'''] , mask=bytes_array.is_null() ) return array_cast(A , self.pa_type )
263
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowerCAmelCase :str = object() # For specifying empty leaf dict `{}` _lowerCAmelCase :str = object() def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ): _UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def lowerCamelCase_ (UpperCamelCase__ : List[str] ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def lowerCamelCase_ (): return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : List[str] = _get_partition_rules() _UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
263
1
"""simple docstring""" import unittest from transformers import DonutProcessor _lowerCAmelCase :str = 'naver-clova-ix/donut-base' class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Optional[Any] = DonutProcessor.from_pretrained(A ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = { '''name''': '''John Doe''', '''age''': '''99''', '''city''': '''Atlanta''', '''state''': '''GA''', '''zip''': '''30301''', '''phone''': '''123-4567''', '''nicknames''': [{'''nickname''': '''Johnny'''}, {'''nickname''': '''JD'''}], } _UpperCAmelCase : Optional[int] = ( '''<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>''' '''<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>''' '''<s_nicknames><s_nickname>Johnny</s_nickname>''' '''<sep/><s_nickname>JD</s_nickname></s_nicknames>''' ) _UpperCAmelCase : Any = self.processor.tokenajson(A ) self.assertDictEqual(A , A )
263
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : str = pipeline( task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = pipeline( task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , ) # This is an audio of a dog _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ] , ) _UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) _UpperCAmelCase : Tuple = audio_classifier( [audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> int: pass
263
1
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ): # Load configuration defined in the metadata file with open(UpperCamelCase__ ) as metadata_file: _UpperCAmelCase : Dict = json.load(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' ) # Load the entity vocab file _UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens _UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name] _UpperCAmelCase : Tuple = state_dict[prefix + matrix_name] _UpperCAmelCase : str = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']] _UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval() _UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )): raise ValueError( '''Unexpected keys''' F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs _UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' ) _UpperCAmelCase : List[str] = ( '''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the''' ''' new world number one avoid a humiliating second- round exit at Wimbledon .''' ) _UpperCAmelCase : Dict = (39, 42) _UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' ) _UpperCAmelCase : List[Any] = model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": _UpperCAmelCase : str = torch.Size((1, 42, 1024) ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base _UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) ) _UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _UpperCAmelCase : int = torch.Size((1, 1, 1024) ) _UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base _UpperCAmelCase : List[str] = torch.Size((1, 1, 768) ) _UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ): _UpperCAmelCase : Any = {} with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(UpperCamelCase__ ): _UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' ) _UpperCAmelCase : Tuple = index return entity_vocab if __name__ == "__main__": _lowerCAmelCase :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) _lowerCAmelCase :Any = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
263
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCAmelCase :Tuple = logging.getLogger(__name__) def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ): def get_dataset(UpperCamelCase__ : List[str] ): _UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ): _UpperCAmelCase : Tuple = [] for epoch in range(UpperCamelCase__ ): # Train quickly model.train() for batch in dataloader: _UpperCAmelCase , _UpperCAmelCase : Dict = batch _UpperCAmelCase : int = model(UpperCamelCase__ ) _UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ ) accelerator.backward(UpperCamelCase__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self ) -> List[Any]: super().__init__() _UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) ) _UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) ) def __lowerCAmelCase ( self , A ) -> Tuple: return x * self.a + self.b class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __lowerCAmelCase ( self ) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Optional[Any] = DummyModel() _UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders() # Train baseline _UpperCAmelCase : Optional[int] = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare( A , A , A , A ) # Save initial _UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' ) accelerator.save_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() _UpperCAmelCase : Tuple = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : List[Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : Dict = DummyModel() _UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders() _UpperCAmelCase : Tuple = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A ) accelerator.load_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : List[str] = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A ) # Save everything _UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' ) accelerator.save_state(A ) # Load everything back in and make sure all states work accelerator.load_state(A ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() _UpperCAmelCase : int = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Union[str, Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A ) _UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : List[str] = train(2 , A , A , A , A ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item() _UpperCAmelCase : Tuple = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] ) _UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] ) _UpperCAmelCase : Optional[int] = DummyModel() _UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() ) _UpperCAmelCase : Optional[int] = Accelerator() with self.assertRaises(A ) as ve: accelerator.register_for_checkpointing(A , A , A , A ) _UpperCAmelCase : Dict = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Tuple = DummyModel() _UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 ) _UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A , A ) # Save initial accelerator.save_state() _UpperCAmelCase : List[str] = scheduler.state_dict() train(3 , A , A , A , A , A ) self.assertNotEqual(A , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(A , scheduler.state_dict() ) def __lowerCAmelCase ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase : Optional[Any] = accelerator.prepare(A ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(A , env=os.environ.copy() ) if __name__ == "__main__": _lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing' _lowerCAmelCase :Any = DummyModel() _lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3) _lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders() _lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCAmelCase :int = group['params'][0].device break assert param_device.type == accelerator.device.type _lowerCAmelCase :Dict = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: _lowerCAmelCase :List[Any] = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: _lowerCAmelCase :Union[str, Any] = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
263
1
"""simple docstring""" import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): _lowerCAmelCase :Optional[int] = True from torch.cuda.amp import autocast _lowerCAmelCase :Any = logging.getLogger(__name__) @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =field( metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} ) a__ =field( default=a ,metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} ,) a__ =field( default=a ,metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} ) a__ =field( default=a ,metadata={'''help''': '''Whether to log verbose messages or not.'''} ,) a__ =field( default=2.0 ,metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} ) a__ =field( default=0.5 ,metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} ) a__ =field( default=0.999995 ,metadata={'''help''': '''Decay of gumbel temperature during training.'''} ) def lowerCamelCase_ (UpperCamelCase__ : ModelArguments , UpperCamelCase__ : TrainingArguments ): logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , ) _UpperCAmelCase : str = logging.WARNING if model_args.verbose_logging: _UpperCAmelCase : Union[str, Any] = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): _UpperCAmelCase : int = logging.INFO logger.setLevel(UpperCamelCase__ ) @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =field( default=a ,metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} ) a__ =field( default=a ,metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} ) a__ =field( default='''train''' ,metadata={ '''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\'''' } ,) a__ =field( default='''validation''' ,metadata={ '''help''': ( '''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'''' ) } ,) a__ =field( default='''file''' ,metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} ,) a__ =field( default=a ,metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} ) a__ =field( default=1 ,metadata={ '''help''': '''The percentage of the train set used as validation set in case there\'s no validation split''' } ,) a__ =field( default=a ,metadata={'''help''': '''The number of processes to use for the preprocessing.'''} ,) a__ =field( default=20.0 ,metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} ) @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =42 a__ =42 a__ ="longest" a__ =None a__ =None def __call__( self , A ) -> Dict[str, torch.Tensor]: # reformat list to dict and set to pytorch format _UpperCAmelCase : Optional[int] = self.feature_extractor.pad( A , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) _UpperCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] ) _UpperCAmelCase : Optional[int] = batch['''input_values'''].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula _UpperCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to( torch.long ) _UpperCAmelCase : str = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device ) # these two operations makes sure that all values # before the output lengths indices are attended to _UpperCAmelCase : Any = 1 _UpperCAmelCase : str = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices _UpperCAmelCase : Dict = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=A , min_masks=2 , ) return batch class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , *A , A=1 , A=0 , A=1.0 , **A ) -> List[str]: super().__init__(*A , **A ) _UpperCAmelCase : str = 0 _UpperCAmelCase : List[str] = max_gumbel_temp _UpperCAmelCase : List[Any] = min_gumbel_temp _UpperCAmelCase : str = gumbel_temp_decay def __lowerCAmelCase ( self , A , A ) -> torch.Tensor: model.train() _UpperCAmelCase : Union[str, Any] = self._prepare_inputs(A ) if self.use_amp: with autocast(): _UpperCAmelCase : Tuple = self.compute_loss(A , A ) else: _UpperCAmelCase : List[str] = self.compute_loss(A , A ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": _UpperCAmelCase : str = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": _UpperCAmelCase : Tuple = loss.sum() / (inputs['''mask_time_indices''']).sum() else: raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' ) if self.args.gradient_accumulation_steps > 1: _UpperCAmelCase : Union[str, Any] = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(A ).backward() elif self.use_apex: with amp.scale_loss(A , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(A ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def lowerCamelCase_ (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _UpperCAmelCase : Optional[int] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = parser.parse_args_into_dataclasses() configure_logger(UpperCamelCase__ , UpperCamelCase__ ) # Downloading and loading a dataset from the hub. _UpperCAmelCase : str = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" _UpperCAmelCase : List[str] = DatasetDict() _UpperCAmelCase : Optional[int] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , ) _UpperCAmelCase : Optional[Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , ) else: # make sure only "validation" and "train" keys remain" _UpperCAmelCase : List[str] = DatasetDict() _UpperCAmelCase : Union[str, Any] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , ) _UpperCAmelCase : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=F'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , ) # only normalized-inputs-training is supported _UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCamelCase__ ) def prepare_dataset(UpperCamelCase__ : Optional[int] ): # check that all files have the correct sampling rate _UpperCAmelCase , _UpperCAmelCase : str = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays _UpperCAmelCase : Dict = datasets.map( UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names ) # filter audio files that are too long _UpperCAmelCase : Tuple = vectorized_datasets.filter( lambda UpperCamelCase__ : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(UpperCamelCase__ : Tuple ): return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` _UpperCAmelCase : int = vectorized_datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 _UpperCAmelCase : Optional[Any] = WavaVecaConfig.from_pretrained( model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( '''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and''' ''' ``config.feat_extract_norm=\'layer\'''' ) _UpperCAmelCase : Optional[Any] = WavaVecaForPreTraining(UpperCamelCase__ ) _UpperCAmelCase : Any = DataCollatorForWavaVecaPretraining(model=UpperCamelCase__ , feature_extractor=UpperCamelCase__ ) _UpperCAmelCase : Tuple = WavaVecaPreTrainer( model=UpperCamelCase__ , data_collator=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCamelCase__ , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , ) trainer.train() if __name__ == "__main__": main()
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase :str = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :str = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : list[str] | None = None , UpperCamelCase__ : dict[str, float] | None = None , UpperCamelCase__ : bool = False , ): _UpperCAmelCase : str = cipher_alphabet or [chr(UpperCamelCase__ ) for i in range(97 , 123 )] # If the argument is None or the user provided an empty dictionary if not frequencies_dict: # Frequencies of letters in the english language (how much they show up) _UpperCAmelCase : Optional[int] = { '''a''': 0.0_8497, '''b''': 0.0_1492, '''c''': 0.0_2202, '''d''': 0.0_4253, '''e''': 0.1_1162, '''f''': 0.0_2228, '''g''': 0.0_2015, '''h''': 0.0_6094, '''i''': 0.0_7546, '''j''': 0.0_0153, '''k''': 0.0_1292, '''l''': 0.0_4025, '''m''': 0.0_2406, '''n''': 0.0_6749, '''o''': 0.0_7507, '''p''': 0.0_1929, '''q''': 0.0_0095, '''r''': 0.0_7587, '''s''': 0.0_6327, '''t''': 0.0_9356, '''u''': 0.0_2758, '''v''': 0.0_0978, '''w''': 0.0_2560, '''x''': 0.0_0150, '''y''': 0.0_1994, '''z''': 0.0_0077, } else: # Custom frequencies dictionary _UpperCAmelCase : List[Any] = frequencies_dict if not case_sensitive: _UpperCAmelCase : Tuple = ciphertext.lower() # Chi squared statistic values _UpperCAmelCase : dict[int, tuple[float, str]] = {} # cycle through all of the shifts for shift in range(len(UpperCamelCase__ ) ): _UpperCAmelCase : Dict = '''''' # decrypt the message with the shift for letter in ciphertext: try: # Try to index the letter in the alphabet _UpperCAmelCase : Any = (alphabet_letters.index(letter.lower() ) - shift) % len( UpperCamelCase__ ) decrypted_with_shift += ( alphabet_letters[new_key].upper() if case_sensitive and letter.isupper() else alphabet_letters[new_key] ) except ValueError: # Append the character if it isn't in the alphabet decrypted_with_shift += letter _UpperCAmelCase : Union[str, Any] = 0.0 # Loop through each letter in the decoded message with the shift for letter in decrypted_with_shift: if case_sensitive: _UpperCAmelCase : Union[str, Any] = letter.lower() if letter in frequencies: # Get the amount of times the letter occurs in the message _UpperCAmelCase : str = decrypted_with_shift.lower().count(UpperCamelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCAmelCase : List[Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCAmelCase : int = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value else: if letter.lower() in frequencies: # Get the amount of times the letter occurs in the message _UpperCAmelCase : int = decrypted_with_shift.count(UpperCamelCase__ ) # Get the excepcted amount of times the letter should appear based # on letter frequencies _UpperCAmelCase : Union[str, Any] = frequencies[letter] * occurrences # Complete the chi squared statistic formula _UpperCAmelCase : str = ((occurrences - expected) ** 2) / expected # Add the margin of error to the total chi squared statistic chi_squared_statistic += chi_letter_value # Add the data to the chi_squared_statistic_values dictionary _UpperCAmelCase : int = ( chi_squared_statistic, decrypted_with_shift, ) # Get the most likely cipher by finding the cipher with the smallest chi squared # statistic def chi_squared_statistic_values_sorting_key(UpperCamelCase__ : int ) -> tuple[float, str]: return chi_squared_statistic_values[key] _UpperCAmelCase : int = min( UpperCamelCase__ , key=UpperCamelCase__ , ) # Get all the data from the most likely cipher (key, decoded message) ( ( _UpperCAmelCase ) , ( _UpperCAmelCase ) , ) : Optional[Any] = chi_squared_statistic_values[most_likely_cipher] # Return the data on the most likely shift return ( most_likely_cipher, most_likely_cipher_chi_squared_value, decoded_most_likely_cipher, )
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Tuple = tf.convert_to_tensor( [ [ 8.2_220_991, # 3rd highest value; idx. 0 -0.5_620_044, 5.23_229_752, 4.0_386_393, -6.8_798_378, -0.54_785_802, -3.2_012_153, 2.92_777_176, 1.88_171_953, 7.35_341_276, # 5th highest value; idx. 9 8.43_207_833, # 2nd highest value; idx. 10 -9.85_711_836, -5.96_209_236, -1.13_039_161, -7.1_115_294, -0.8_369_633, -5.3_186_408, 7.06_427_407, 0.81_369_344, -0.82_023_817, -5.9_179_796, 0.58_813_443, -6.99_778_438, 4.71_551_189, -0.18_771_637, 7.44_020_759, # 4th highest value; idx. 25 9.38_450_987, # 1st highest value; idx. 26 2.12_662_941, -9.32_562_038, 2.35_652_522, ], # cummulative prob of 5 highest values <= 0.6 [ 0.58_425_518, 4.53_139_238, -5.57_510_464, -6.28_030_699, -7.19_529_503, -4.02_122_551, 1.39_337_037, -6.06_707_057, 1.59_480_517, -9.643_119, 0.03_907_799, 0.67_231_762, -8.88_206_726, 6.27_115_922, # 4th highest value; idx. 13 2.28_520_723, 4.82_767_506, 4.30_421_368, 8.8_275_313, # 2nd highest value; idx. 17 5.44_029_958, # 5th highest value; idx. 18 -4.4_735_794, 7.38_579_536, # 3rd highest value; idx. 20 -2.91_051_663, 2.61_946_077, -2.5_674_762, -9.48_959_302, -4.02_922_645, -1.35_416_918, 9.67_702_323, # 1st highest value; idx. 27 -5.89_478_553, 1.85_370_467, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) _UpperCAmelCase : Optional[int] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above _UpperCAmelCase : Union[str, Any] = tf.convert_to_tensor( [8.222_099, 7.3_534_126, 8.432_078, 7.4_402_075, 9.38_451, 6.271_159, 8.827_531, 5.4_402_995, 7.3_857_956, 9.677_023] , dtype=tf.floataa , ) # expected non filtered values as noted above _UpperCAmelCase : Optional[Any] = tf_top_k_top_p_filtering(A , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4 ) _UpperCAmelCase : Tuple = output[output != -float('''inf''' )] _UpperCAmelCase : Union[str, Any] = tf.cast( tf.where(tf.not_equal(A , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(A , A , rtol=1E-12 ) tf.debugging.assert_equal(A , A ) @require_tf class _UpperCAmelCase ( unittest.TestCase ,a ): '''simple docstring''' if is_tf_available(): a__ ={ '''AutoModelForCausalLM''': TFAutoModelForCausalLM, '''AutoModelForSpeechSeq2Seq''': TFAutoModelForSpeechSeqaSeq, '''AutoModelForSeq2SeqLM''': TFAutoModelForSeqaSeqLM, '''AutoModelForVision2Seq''': TFAutoModelForVisionaSeq, '''LogitsProcessorList''': TFLogitsProcessorList, '''MinLengthLogitsProcessor''': TFMinLengthLogitsProcessor, '''create_tensor_fn''': tf.convert_to_tensor, '''floats_tensor''': floats_tensor, '''return_tensors''': '''tf''', } @slow def __lowerCAmelCase ( self ) -> Union[str, Any]: # TF-only test: tf.saved_model export _UpperCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : int = 2 _UpperCAmelCase : int = 2 class _UpperCAmelCase ( tf.Module ): '''simple docstring''' def __init__( self , A ) -> Optional[Any]: super(A , self ).__init__() _UpperCAmelCase : int = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=A , ) def __lowerCAmelCase ( self , A , A ) -> Dict: _UpperCAmelCase : List[Any] = self.model.generate( input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , ) return {"sequences": outputs["sequences"]} _UpperCAmelCase : List[Any] = [[2, 0], [1_0_2, 1_0_3]] _UpperCAmelCase : int = [[1, 0], [1, 1]] _UpperCAmelCase : Any = DummyModel(model=A ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} ) _UpperCAmelCase : Any = tf.saved_model.load(A ).signatures['''serving_default'''] for batch_size in range(1 , len(A ) + 1 ): _UpperCAmelCase : str = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } _UpperCAmelCase : int = serving_func(**A )['''sequences'''] _UpperCAmelCase : int = test_model.generate(**A , max_new_tokens=A ) tf.debugging.assert_equal(A , A ) @slow def __lowerCAmelCase ( self ) -> Any: # TF-only test: tf.saved_model export _UpperCAmelCase : List[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : str = 1 _UpperCAmelCase : List[Any] = 2 class _UpperCAmelCase ( tf.Module ): '''simple docstring''' def __init__( self , A ) -> int: super(A , self ).__init__() _UpperCAmelCase : Dict = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=A , ) def __lowerCAmelCase ( self , A , A ) -> Dict: _UpperCAmelCase : Tuple = self.model.generate( input_ids=A , attention_mask=A , max_new_tokens=A , return_dict_in_generate=A , ) return {"sequences": outputs["sequences"]} _UpperCAmelCase : List[Any] = [[2], [1_0_2, 1_0_3]] _UpperCAmelCase : Union[str, Any] = [[1], [1, 1]] _UpperCAmelCase : str = DummyModel(model=A ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(A , A , signatures={'''serving_default''': dummy_model.serving} ) _UpperCAmelCase : Tuple = tf.saved_model.load(A ).signatures['''serving_default'''] for input_row in range(len(A ) ): _UpperCAmelCase : str = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } _UpperCAmelCase : int = serving_func(**A )['''sequences'''] _UpperCAmelCase : Any = test_model.generate(**A , max_new_tokens=A ) tf.debugging.assert_equal(A , A ) @slow @require_tensorflow_text def __lowerCAmelCase ( self ) -> Any: # TF-only test: tf.saved_model export with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=A ) class _UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self ) -> List[str]: super().__init__() _UpperCAmelCase : Dict = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(A , '''spiece.model''' ) , '''rb''' ).read() ) _UpperCAmelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def __lowerCAmelCase ( self , A , *A , **A ) -> Any: _UpperCAmelCase : Any = self.tokenizer.tokenize(A ) _UpperCAmelCase , _UpperCAmelCase : List[str] = text.pad_model_inputs( A , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id ) _UpperCAmelCase : List[str] = self.model.generate(input_ids=A , attention_mask=A ) return self.tokenizer.detokenize(A ) _UpperCAmelCase : List[str] = CompleteSentenceTransformer() _UpperCAmelCase : List[Any] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) _UpperCAmelCase : str = complete_model(A ) _UpperCAmelCase : int = tf.keras.Model(A , A ) keras_model.save(A ) def __lowerCAmelCase ( self ) -> Tuple: # Has PT equivalent: this test relies on random sampling _UpperCAmelCase : Tuple = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 1_0, '''temperature''': 0.7, } _UpperCAmelCase : Optional[Any] = 1_4 _UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Optional[int] = '''Hello, my dog is cute and''' _UpperCAmelCase : str = tokenizer(A , return_tensors='''tf''' ) _UpperCAmelCase : int = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Optional[Any] = 6_3_8 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) _UpperCAmelCase : List[Any] = model.generate(**A , eos_token_id=A , **A ) self.assertTrue(expectation == len(generated_tokens[0] ) ) _UpperCAmelCase : List[str] = [6_3_8, 1_9_8] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) _UpperCAmelCase : Union[str, Any] = model.generate(**A , eos_token_id=A , **A ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def __lowerCAmelCase ( self ) -> str: # Has PT equivalent: ample use of framework-specific code _UpperCAmelCase : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) _UpperCAmelCase : Union[str, Any] = '''Hugging Face is a technology company based in New York and Paris.''' _UpperCAmelCase : List[str] = bart_tokenizer(A , return_tensors='''tf''' ).input_ids _UpperCAmelCase : List[Any] = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) _UpperCAmelCase : str = bart_model.generate(A ).numpy() class _UpperCAmelCase ( a ): '''simple docstring''' def __lowerCAmelCase ( self , A , A=None , **A ) -> Optional[int]: return super().call(A , **A ) _UpperCAmelCase : List[str] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) _UpperCAmelCase : Union[str, Any] = bart_model.generate(A , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(A , A ) ) class _UpperCAmelCase ( bart_model.model.encoder.__class__ ): '''simple docstring''' def __lowerCAmelCase ( self , A , **A ) -> List[Any]: return super().call(A , **A ) _UpperCAmelCase : List[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) _UpperCAmelCase : Optional[int] = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) _UpperCAmelCase : int = bart_model.generate(A ).numpy() with self.assertRaises(A ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(A , foo='''bar''' )
263
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =IFImgaImgSuperResolutionPipeline a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Any = torch.manual_seed(A ) else: _UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
263
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase :Any = logging.get_logger(__name__) _lowerCAmelCase :Optional[Any] = {'vocab_file': 'sentencepiece.bpe.model'} _lowerCAmelCase :List[Any] = { 'vocab_file': { 'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model', } } _lowerCAmelCase :int = { 'camembert-base': 512, } _lowerCAmelCase :List[str] = '▁' class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A=["<s>NOTUSED", "</s>NOTUSED"] , A = None , **A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Optional[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token _UpperCAmelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , additional_special_tokens=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) _UpperCAmelCase : Any = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> _UpperCAmelCase : int = {'''<s>NOTUSED''': 0, '''<pad>''': 1, '''</s>NOTUSED''': 2, '''<unk>''': 3} _UpperCAmelCase : Optional[Any] = len(self.fairseq_tokens_to_ids ) _UpperCAmelCase : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) _UpperCAmelCase : List[str] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __lowerCAmelCase ( self , A , A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] _UpperCAmelCase : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Optional[Any] = [self.sep_token_id] _UpperCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self ) -> str: return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , A ) -> List[str]: return self.sp_model.encode(A , out_type=A ) def __lowerCAmelCase ( self , A ) -> List[str]: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(A ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(A ) def __lowerCAmelCase ( self , A ) -> List[Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , A ) -> List[str]: _UpperCAmelCase : List[str] = [] _UpperCAmelCase : Optional[int] = '''''' _UpperCAmelCase : Optional[int] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(A ) + token _UpperCAmelCase : str = True _UpperCAmelCase : List[str] = [] else: current_sub_tokens.append(A ) _UpperCAmelCase : Optional[int] = False out_string += self.sp_model.decode(A ) return out_string.strip() def __getstate__( self ) -> Tuple: _UpperCAmelCase : str = self.__dict__.copy() _UpperCAmelCase : int = None return state def __setstate__( self , A ) -> int: _UpperCAmelCase : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : Any = {} _UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : Optional[int] = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A ) elif not os.path.isfile(self.vocab_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : List[Any] = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) _UpperCAmelCase : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_big_bird import BigBirdTokenizer else: _lowerCAmelCase :Tuple = None _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) _lowerCAmelCase :str = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'} _lowerCAmelCase :List[Any] = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), }, 'tokenizer_file': { 'google/bigbird-roberta-base': ( 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json' ), 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json' ), }, } _lowerCAmelCase :Optional[int] = { 'google/bigbird-roberta-base': 4_096, 'google/bigbird-roberta-large': 4_096, 'google/bigbird-base-trivia-itc': 4_096, } _lowerCAmelCase :Optional[int] = '▁' class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =BigBirdTokenizer a__ =['''input_ids''', '''attention_mask'''] a__ =[] def __init__( self , A=None , A=None , A="<unk>" , A="<s>" , A="</s>" , A="<pad>" , A="[SEP]" , A="[MASK]" , A="[CLS]" , **A , ) -> int: _UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else bos_token _UpperCAmelCase : int = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else eos_token _UpperCAmelCase : List[Any] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else unk_token _UpperCAmelCase : str = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else pad_token _UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else cls_token _UpperCAmelCase : List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else sep_token # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token super().__init__( A , tokenizer_file=A , bos_token=A , eos_token=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , **A , ) _UpperCAmelCase : Dict = vocab_file _UpperCAmelCase : Optional[int] = False if not self.vocab_file else True def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Union[str, Any] = [self.sep_token_id] _UpperCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: if token_ids_a is not None: raise ValueError( '''You should not supply a second sequence if the provided sequence of ''' '''ids is already formatted with special tokens for the model.''' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1] + ([0] * len(A )) + [1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : str = [self.sep_token_id] _UpperCAmelCase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( '''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ''' '''tokenizer.''' ) if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : str = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ): copyfile(self.vocab_file , A ) return (out_vocab_file,)
263
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ ) _UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model'''] _UpperCAmelCase : Optional[Any] = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase : Any = {} _UpperCAmelCase : Any = '''first_stage_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase : Tuple = {} _UpperCAmelCase : int = '''model.diffusion_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] _UpperCAmelCase : List[str] = config.model.params.first_stage_config.params _UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params _UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : int = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) _lowerCAmelCase :List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
263
1
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] ): _UpperCAmelCase : str = [0 for i in range(r + 1 )] # nc0 = 1 _UpperCAmelCase : Union[str, Any] = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. _UpperCAmelCase : Union[str, Any] = min(UpperCamelCase__ , UpperCamelCase__ ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) _lowerCAmelCase :Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''falcon''' a__ =['''past_key_values'''] def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any: _UpperCAmelCase : int = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A ) _UpperCAmelCase : int = hidden_size if n_embed is None else n_embed _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Optional[int] = use_cache _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase : Dict = alibi _UpperCAmelCase : Optional[int] = new_decoder_architecture _UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase : Optional[int] = parallel_attn _UpperCAmelCase : Optional[int] = bias super().__init__(bos_token_id=A , eos_token_id=A , **A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> List[Any]: return not self.alibi
263
1
"""simple docstring""" from math import factorial _lowerCAmelCase :Dict = {str(d): factorial(d) for d in range(10)} def lowerCamelCase_ (UpperCamelCase__ : int ): return sum(DIGIT_FACTORIAL[d] for d in str(UpperCamelCase__ ) ) def lowerCamelCase_ (): _UpperCAmelCase : Optional[Any] = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , UpperCamelCase__ ) if sum_of_digit_factorial(UpperCamelCase__ ) == i ) if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
1
"""simple docstring""" import inspect import unittest import warnings from math import ceil, floor from transformers import LevitConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING, MODEL_MAPPING, LevitForImageClassification, LevitForImageClassificationWithTeacher, LevitModel, ) from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class _UpperCAmelCase ( a ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : str = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(A , '''hidden_sizes''' ) ) self.parent.assertTrue(hasattr(A , '''num_attention_heads''' ) ) class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A=1_3 , A=6_4 , A=3 , A=3 , A=2 , A=1 , A=1_6 , A=[1_2_8, 2_5_6, 3_8_4] , A=[4, 6, 8] , A=[2, 3, 4] , A=[1_6, 1_6, 1_6] , A=0 , A=[2, 2, 2] , A=[2, 2, 2] , A=0.02 , A=True , A=True , A=2 , ) -> Union[str, Any]: _UpperCAmelCase : str = parent _UpperCAmelCase : Optional[int] = batch_size _UpperCAmelCase : int = image_size _UpperCAmelCase : List[str] = num_channels _UpperCAmelCase : Optional[int] = kernel_size _UpperCAmelCase : str = stride _UpperCAmelCase : Union[str, Any] = padding _UpperCAmelCase : Any = hidden_sizes _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Any = depths _UpperCAmelCase : Optional[Any] = key_dim _UpperCAmelCase : Any = drop_path_rate _UpperCAmelCase : Optional[int] = patch_size _UpperCAmelCase : Optional[int] = attention_ratio _UpperCAmelCase : Any = mlp_ratio _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : Dict = [ ['''Subsample''', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['''Subsample''', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] _UpperCAmelCase : List[str] = is_training _UpperCAmelCase : Dict = use_labels _UpperCAmelCase : Tuple = num_labels _UpperCAmelCase : Optional[Any] = initializer_range def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) _UpperCAmelCase : Optional[Any] = None if self.use_labels: _UpperCAmelCase : Any = ids_tensor([self.batch_size] , self.num_labels ) _UpperCAmelCase : Optional[int] = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self ) -> Dict: return LevitConfig( image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , ) def __lowerCAmelCase ( self , A , A , A ) -> Optional[int]: _UpperCAmelCase : List[Any] = LevitModel(config=A ) model.to(A ) model.eval() _UpperCAmelCase : Union[str, Any] = model(A ) _UpperCAmelCase : Any = (self.image_size, self.image_size) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = image_size[0], image_size[1] for _ in range(4 ): _UpperCAmelCase : int = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) _UpperCAmelCase : int = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , ) def __lowerCAmelCase ( self , A , A , A ) -> Dict: _UpperCAmelCase : List[Any] = self.num_labels _UpperCAmelCase : List[str] = LevitForImageClassification(A ) model.to(A ) model.eval() _UpperCAmelCase : Optional[int] = model(A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = config_and_inputs _UpperCAmelCase : List[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =( (LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher) if is_torch_available() else () ) a__ =( { '''feature-extraction''': LevitModel, '''image-classification''': (LevitForImageClassification, LevitForImageClassificationWithTeacher), } if is_torch_available() else {} ) a__ =False a__ =False a__ =False a__ =False a__ =False def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Optional[int] = LevitModelTester(self ) _UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , has_text_modality=A , hidden_size=3_7 ) def __lowerCAmelCase ( self ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCAmelCase ( self ) -> List[Any]: return @unittest.skip(reason='''Levit does not use inputs_embeds''' ) def __lowerCAmelCase ( self ) -> Any: pass @unittest.skip(reason='''Levit does not support input and output embeddings''' ) def __lowerCAmelCase ( self ) -> int: pass @unittest.skip(reason='''Levit does not output attentions''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : int = model_class(A ) _UpperCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase : str = [*signature.parameters.keys()] _UpperCAmelCase : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , A ) def __lowerCAmelCase ( self ) -> Optional[Any]: def check_hidden_states_output(A , A , A ): _UpperCAmelCase : Optional[Any] = model_class(A ) model.to(A ) model.eval() with torch.no_grad(): _UpperCAmelCase : int = model(**self._prepare_for_class(A , A ) ) _UpperCAmelCase : Optional[int] = outputs.hidden_states _UpperCAmelCase : Any = len(self.model_tester.depths ) + 1 self.assertEqual(len(A ) , A ) _UpperCAmelCase : int = (self.model_tester.image_size, self.model_tester.image_size) _UpperCAmelCase , _UpperCAmelCase : Any = image_size[0], image_size[1] for _ in range(4 ): _UpperCAmelCase : Optional[Any] = floor( ( (height + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) _UpperCAmelCase : int = floor( ( (width + 2 * self.model_tester.padding - self.model_tester.kernel_size) / self.model_tester.stride ) + 1 ) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [ height * width, self.model_tester.hidden_sizes[0], ] , ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(A , A , A ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _UpperCAmelCase : str = True check_hidden_states_output(A , A , A ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self ) -> Optional[int]: pass def __lowerCAmelCase ( self , A , A , A=False ) -> Any: _UpperCAmelCase : Optional[int] = super()._prepare_for_class(A , A , return_labels=A ) if return_labels: if model_class.__name__ == "LevitForImageClassificationWithTeacher": del inputs_dict["labels"] return inputs_dict def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*A ) def __lowerCAmelCase ( self ) -> List[Any]: if not self.model_tester.is_training: return _UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Union[str, Any] = True for model_class in self.all_model_classes: # LevitForImageClassificationWithTeacher supports inference-only if ( model_class in get_values(A ) or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue _UpperCAmelCase : str = model_class(A ) model.to(A ) model.train() _UpperCAmelCase : List[Any] = self._prepare_for_class(A , A , return_labels=A ) _UpperCAmelCase : Any = model(**A ).loss loss.backward() def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase , _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return _UpperCAmelCase : Dict = False _UpperCAmelCase : Optional[Any] = True for model_class in self.all_model_classes: if model_class in get_values(A ) or not model_class.supports_gradient_checkpointing: continue # LevitForImageClassificationWithTeacher supports inference-only if model_class.__name__ == "LevitForImageClassificationWithTeacher": continue _UpperCAmelCase : List[Any] = model_class(A ) model.gradient_checkpointing_enable() model.to(A ) model.train() _UpperCAmelCase : List[str] = self._prepare_for_class(A , A , return_labels=A ) _UpperCAmelCase : int = model(**A ).loss loss.backward() def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() _UpperCAmelCase : Any = [ {'''title''': '''multi_label_classification''', '''num_labels''': 2, '''dtype''': torch.float}, {'''title''': '''single_label_classification''', '''num_labels''': 1, '''dtype''': torch.long}, {'''title''': '''regression''', '''num_labels''': 1, '''dtype''': torch.float}, ] for model_class in self.all_model_classes: if ( model_class not in [ *get_values(A ), ] or model_class.__name__ == "LevitForImageClassificationWithTeacher" ): continue for problem_type in problem_types: with self.subTest(msg=f'Testing {model_class} with {problem_type["title"]}' ): _UpperCAmelCase : Tuple = problem_type['''title'''] _UpperCAmelCase : Union[str, Any] = problem_type['''num_labels'''] _UpperCAmelCase : Union[str, Any] = model_class(A ) model.to(A ) model.train() _UpperCAmelCase : List[Any] = self._prepare_for_class(A , A , return_labels=A ) if problem_type["num_labels"] > 1: _UpperCAmelCase : Any = inputs['''labels'''].unsqueeze(1 ).repeat(1 , problem_type['''num_labels'''] ) _UpperCAmelCase : Optional[int] = inputs['''labels'''].to(problem_type['''dtype'''] ) # This tests that we do not trigger the warning form PyTorch "Using a target size that is different # to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure # they have the same size." which is a symptom something in wrong for the regression problem. # See https://github.com/huggingface/transformers/issues/11780 with warnings.catch_warnings(record=A ) as warning_list: _UpperCAmelCase : Optional[int] = model(**A ).loss for w in warning_list: if "Using a target size that is different to the input size" in str(w.message ): raise ValueError( f'Something is going wrong in the regression problem: intercepted {w.message}' ) loss.backward() @slow def __lowerCAmelCase ( self ) -> Any: for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _UpperCAmelCase : List[Any] = LevitModel.from_pretrained(A ) self.assertIsNotNone(A ) def lowerCamelCase_ (): _UpperCAmelCase : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) return image @require_torch @require_vision class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @cached_property def __lowerCAmelCase ( self ) -> Any: return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) @slow def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : List[Any] = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to( A ) _UpperCAmelCase : str = self.default_image_processor _UpperCAmelCase : Dict = prepare_img() _UpperCAmelCase : int = image_processor(images=A , return_tensors='''pt''' ).to(A ) # forward pass with torch.no_grad(): _UpperCAmelCase : List[str] = model(**A ) # verify the logits _UpperCAmelCase : Optional[int] = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , A ) _UpperCAmelCase : List[str] = torch.tensor([1.0_448, -0.3_745, -1.8_317] ).to(A ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
263
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase :Tuple = tuple[int, int] class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : set[int] = vertices _UpperCAmelCase : dict[EdgeT, int] = { (min(A ), max(A )): weight for edge, weight in edges.items() } def __lowerCAmelCase ( self , A , A ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _UpperCAmelCase : List[Any] = weight def __lowerCAmelCase ( self ) -> Graph: _UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} ) _UpperCAmelCase : EdgeT _UpperCAmelCase : int _UpperCAmelCase : EdgeT _UpperCAmelCase : int while len(subgraph.vertices ) < len(self.vertices ): _UpperCAmelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _UpperCAmelCase : Tuple = edge _UpperCAmelCase : Optional[int] = weight subgraph.add_edge(A , A ) return subgraph def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ): _UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : dict[EdgeT, int] = {} _UpperCAmelCase : list[str] _UpperCAmelCase : int _UpperCAmelCase : int with open(UpperCamelCase__ ) as f: _UpperCAmelCase : str = f.read().strip().split('''\n''' ) _UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": _UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] ) _UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ ) _UpperCAmelCase : Graph = graph.prims_algorithm() _UpperCAmelCase : int = sum(graph.edges.values() ) _UpperCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
263
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self , A , A=7 , A=3 , A=3_0 , A=4_0_0 , A=True , A=None , A=0.9 , A=None , A=True , A=[0.5, 0.5, 0.5] , A=[0.5, 0.5, 0.5] , ) -> Tuple: _UpperCAmelCase : Dict = size if size is not None else {'''shortest_edge''': 3_0} _UpperCAmelCase : Tuple = crop_size if crop_size is not None else {'''height''': 3_0, '''width''': 3_0} _UpperCAmelCase : Optional[Any] = parent _UpperCAmelCase : Union[str, Any] = batch_size _UpperCAmelCase : str = num_channels _UpperCAmelCase : Tuple = min_resolution _UpperCAmelCase : Optional[Any] = max_resolution _UpperCAmelCase : Union[str, Any] = do_resize_and_center_crop _UpperCAmelCase : Any = size _UpperCAmelCase : Optional[int] = crop_pct _UpperCAmelCase : int = crop_size _UpperCAmelCase : Optional[int] = do_normalize _UpperCAmelCase : List[str] = image_mean _UpperCAmelCase : Tuple = image_std def __lowerCAmelCase ( self ) -> int: return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class _UpperCAmelCase ( a ,unittest.TestCase ): '''simple docstring''' a__ =PoolFormerImageProcessor if is_vision_available() else None def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : List[str] = PoolFormerImageProcessingTester(self ) @property def __lowerCAmelCase ( self ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(A , '''do_resize_and_center_crop''' ) ) self.assertTrue(hasattr(A , '''size''' ) ) self.assertTrue(hasattr(A , '''crop_pct''' ) ) self.assertTrue(hasattr(A , '''do_normalize''' ) ) self.assertTrue(hasattr(A , '''image_mean''' ) ) self.assertTrue(hasattr(A , '''image_std''' ) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 3_0} ) self.assertEqual(image_processor.crop_size , {'''height''': 3_0, '''width''': 3_0} ) _UpperCAmelCase : int = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} ) self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass def __lowerCAmelCase ( self ) -> Optional[Any]: # Initialize image_processing _UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images _UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A ) for image in image_inputs: self.assertIsInstance(A , Image.Image ) # Test not batched input _UpperCAmelCase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ) -> Tuple: # Initialize image_processing _UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors _UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , numpify=A ) for image in image_inputs: self.assertIsInstance(A , np.ndarray ) # Test not batched input _UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCAmelCase : Optional[Any] = image_processing(A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def __lowerCAmelCase ( self ) -> List[Any]: # Initialize image_processing _UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors _UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=A , torchify=A ) for image in image_inputs: self.assertIsInstance(A , torch.Tensor ) # Test not batched input _UpperCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched _UpperCAmelCase : Any = image_processing(A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''mgp-str''' def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]: super().__init__(**A ) _UpperCAmelCase : Any = image_size _UpperCAmelCase : str = patch_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Dict = max_token_length _UpperCAmelCase : Optional[Any] = num_character_labels _UpperCAmelCase : int = num_bpe_labels _UpperCAmelCase : List[str] = num_wordpiece_labels _UpperCAmelCase : Optional[int] = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : List[Any] = mlp_ratio _UpperCAmelCase : List[str] = distilled _UpperCAmelCase : Optional[int] = layer_norm_eps _UpperCAmelCase : str = drop_rate _UpperCAmelCase : List[Any] = qkv_bias _UpperCAmelCase : List[str] = attn_drop_rate _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : Union[str, Any] = output_aa_attentions _UpperCAmelCase : List[str] = initializer_range
263
1
"""simple docstring""" _lowerCAmelCase :Tuple = [0, 2, 4, 6, 8] _lowerCAmelCase :str = [1, 3, 5, 7, 9] def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1 ): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 _UpperCAmelCase : Optional[int] = 0 for digit in range(10 ): _UpperCAmelCase : Tuple = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , UpperCamelCase__ , UpperCamelCase__ ) return result _UpperCAmelCase : Optional[Any] = 0 for digita in range(10 ): _UpperCAmelCase : Optional[Any] = digita if (remainder + digita) % 2 == 0: _UpperCAmelCase : List[Any] = ODD_DIGITS else: _UpperCAmelCase : List[str] = EVEN_DIGITS for digita in other_parity_digits: _UpperCAmelCase : Optional[Any] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , UpperCamelCase__ , UpperCamelCase__ , ) return result def lowerCamelCase_ (UpperCamelCase__ : int = 9 ): _UpperCAmelCase : Union[str, Any] = 0 for length in range(1 , max_power + 1 ): result += reversible_numbers(UpperCamelCase__ , 0 , [0] * length , UpperCamelCase__ ) return result if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ): if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(UpperCamelCase__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) def lowerCamelCase_ (): _UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423] _UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
263
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Tuple: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Tuple: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Optional[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Tuple: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> List[Any]: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> List[Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Dict: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> List[str]: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Any: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Union[str, Any]: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Any: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Optional[int]: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Dict: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> int: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> int: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Union[str, Any]: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> int: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Any: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Dict: requires_backends(cls , ['''flax'''] ) class _UpperCAmelCase ( metaclass=a ): '''simple docstring''' a__ =['''flax'''] def __init__( self , *A , **A ) -> Tuple: requires_backends(self , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Optional[int]: requires_backends(cls , ['''flax'''] ) @classmethod def __lowerCAmelCase ( cls , *A , **A ) -> Any: requires_backends(cls , ['''flax'''] )
263
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCAmelCase :Optional[Any] = False class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) _UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = generator.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = '''cyberpunk 2077''' _UpperCAmelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.dual_guided( prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images _UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger ''' _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.text_to_image( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images _UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images _UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
263
1
"""simple docstring""" import json from typing import Iterator, List, Union from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers from tokenizers.implementations.base_tokenizer import BaseTokenizer from tokenizers.models import Unigram from tokenizers.processors import TemplateProcessing class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , A = "▁" , A = True , A = "<unk>" , A = "</s>" , A = "<pad>" , ) -> List[Any]: _UpperCAmelCase : List[Any] = { '''pad''': {'''id''': 0, '''token''': pad_token}, '''eos''': {'''id''': 1, '''token''': eos_token}, '''unk''': {'''id''': 2, '''token''': unk_token}, } _UpperCAmelCase : List[str] = [None] * len(self.special_tokens ) for token_dict in self.special_tokens.values(): _UpperCAmelCase : Tuple = token_dict['''token'''] _UpperCAmelCase : Union[str, Any] = Tokenizer(Unigram() ) _UpperCAmelCase : Optional[Any] = normalizers.Sequence( [ normalizers.Nmt(), normalizers.NFKC(), normalizers.Replace(Regex(''' {2,}''' ) , ''' ''' ), normalizers.Lowercase(), ] ) _UpperCAmelCase : Any = pre_tokenizers.Sequence( [ pre_tokenizers.Metaspace(replacement=A , add_prefix_space=A ), pre_tokenizers.Digits(individual_digits=A ), pre_tokenizers.Punctuation(), ] ) _UpperCAmelCase : Optional[int] = decoders.Metaspace(replacement=A , add_prefix_space=A ) _UpperCAmelCase : Optional[Any] = TemplateProcessing( single=f'$A {self.special_tokens["eos"]["token"]}' , special_tokens=[(self.special_tokens['''eos''']['''token'''], self.special_tokens['''eos''']['''id'''])] , ) _UpperCAmelCase : Optional[Any] = { '''model''': '''SentencePieceUnigram''', '''replacement''': replacement, '''add_prefix_space''': add_prefix_space, } super().__init__(A , A ) def __lowerCAmelCase ( self , A , A = 8_0_0_0 , A = True , ) -> Optional[int]: _UpperCAmelCase : Tuple = trainers.UnigramTrainer( vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , ) if isinstance(A , A ): _UpperCAmelCase : List[Any] = [files] self._tokenizer.train(A , trainer=A ) self.add_unk_id() def __lowerCAmelCase ( self , A , A = 8_0_0_0 , A = True , ) -> List[Any]: _UpperCAmelCase : Dict = trainers.UnigramTrainer( vocab_size=A , special_tokens=self.special_tokens_list , show_progress=A , ) self._tokenizer.train_from_iterator(A , trainer=A ) self.add_unk_id() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Optional[int] = json.loads(self._tokenizer.to_str() ) _UpperCAmelCase : int = self.special_tokens['''unk''']['''id'''] _UpperCAmelCase : List[str] = Tokenizer.from_str(json.dumps(A ) )
263
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCAmelCase :Any = False @skip_mps class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ): '''simple docstring''' a__ =StableDiffusionAttendAndExcitePipeline a__ =False a__ =TEXT_TO_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) a__ =TEXT_TO_IMAGE_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __lowerCAmelCase ( cls ) -> List[str]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , ) _UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) _UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _UpperCAmelCase : List[str] = CLIPTextModel(A ) _UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCAmelCase : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: _UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : List[str] = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = '''cpu''' _UpperCAmelCase : Tuple = self.get_dummy_components() _UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Dict = self.get_dummy_inputs(A ) _UpperCAmelCase : Union[str, Any] = pipe(**A ).images _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) _UpperCAmelCase : int = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) _UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1E-3 ) def __lowerCAmelCase ( self ) -> Dict: super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> str: super().test_save_load_local(expected_max_difference=5E-4 ) def __lowerCAmelCase ( self ) -> Optional[int]: super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = torch.manual_seed(5_1 ) _UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses''' _UpperCAmelCase : int = [5, 7] _UpperCAmelCase : Dict = pipe( prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _UpperCAmelCase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
263
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging _lowerCAmelCase :Optional[Any] = logging.get_logger(__name__) _lowerCAmelCase :Optional[Any] = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''gptj''' a__ ={ '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , A=5_0_4_0_0 , A=2_0_4_8 , A=4_0_9_6 , A=2_8 , A=1_6 , A=6_4 , A=None , A="gelu_new" , A=0.0 , A=0.0 , A=0.0 , A=1E-5 , A=0.02 , A=True , A=5_0_2_5_6 , A=5_0_2_5_6 , A=False , **A , ) -> str: _UpperCAmelCase : Tuple = vocab_size _UpperCAmelCase : Optional[int] = n_positions _UpperCAmelCase : Optional[Any] = n_embd _UpperCAmelCase : Dict = n_layer _UpperCAmelCase : Union[str, Any] = n_head _UpperCAmelCase : Union[str, Any] = n_inner _UpperCAmelCase : int = rotary_dim _UpperCAmelCase : Tuple = activation_function _UpperCAmelCase : int = resid_pdrop _UpperCAmelCase : str = embd_pdrop _UpperCAmelCase : Optional[Any] = attn_pdrop _UpperCAmelCase : Any = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : int = use_cache _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : str = eos_token_id super().__init__( bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A ) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , A , A = "default" , A = None , A = False , ) -> Any: super().__init__(A , task=A , patching_specs=A , use_past=A ) if not getattr(self._config , '''pad_token_id''' , A ): # TODO: how to do that better? _UpperCAmelCase : List[str] = 0 @property def __lowerCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: _UpperCAmelCase : List[str] = OrderedDict({'''input_ids''': {0: '''batch''', 1: '''sequence'''}} ) if self.use_past: self.fill_with_past_key_values_(A , direction='''inputs''' ) _UpperCAmelCase : List[Any] = {0: '''batch''', 1: '''past_sequence + sequence'''} else: _UpperCAmelCase : Optional[int] = {0: '''batch''', 1: '''sequence'''} return common_inputs @property def __lowerCAmelCase ( self ) -> int: return self._config.n_layer @property def __lowerCAmelCase ( self ) -> int: return self._config.n_head def __lowerCAmelCase ( self , A , A = -1 , A = -1 , A = False , A = None , ) -> Mapping[str, Any]: _UpperCAmelCase : Optional[Any] = super(A , self ).generate_dummy_inputs( A , batch_size=A , seq_length=A , is_pair=A , framework=A ) # We need to order the input in the way they appears in the forward() _UpperCAmelCase : Optional[Any] = OrderedDict({'''input_ids''': common_inputs['''input_ids''']} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' ) else: import torch _UpperCAmelCase , _UpperCAmelCase : Optional[int] = common_inputs['''input_ids'''].shape # Not using the same length for past_key_values _UpperCAmelCase : Dict = seqlen + 2 _UpperCAmelCase : Tuple = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) _UpperCAmelCase : List[Any] = [ (torch.zeros(A ), torch.zeros(A )) for _ in range(self.num_layers ) ] _UpperCAmelCase : Optional[Any] = common_inputs['''attention_mask'''] if self.use_past: _UpperCAmelCase : Any = ordered_inputs['''attention_mask'''].dtype _UpperCAmelCase : Union[str, Any] = torch.cat( [ordered_inputs['''attention_mask'''], torch.ones(A , A , dtype=A )] , dim=1 ) return ordered_inputs @property def __lowerCAmelCase ( self ) -> int: return 1_3
263
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[str] = -1 _UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : str = TextStreamer(A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : List[str] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[Any] = -1 _UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] ) _UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A ) _UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A ) thread.start() _UpperCAmelCase : Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Any = -1 _UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :] _UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Optional[int]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' ) _UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A ) _UpperCAmelCase : Tuple = -1 _UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A ) model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n" _UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Dict = -1 _UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 ) _UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(A ): _UpperCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
263
1
"""simple docstring""" from __future__ import annotations _lowerCAmelCase :Union[str, Any] = { 'A': ['B', 'C', 'E'], 'B': ['A', 'D', 'E'], 'C': ['A', 'F', 'G'], 'D': ['B'], 'E': ['A', 'B', 'D'], 'F': ['C'], 'G': ['C'], } class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : Union[str, Any] = graph # mapping node to its parent in resulting breadth first tree _UpperCAmelCase : dict[str, str | None] = {} _UpperCAmelCase : List[str] = source_vertex def __lowerCAmelCase ( self ) -> None: _UpperCAmelCase : Optional[int] = {self.source_vertex} _UpperCAmelCase : Optional[Any] = None _UpperCAmelCase : Optional[Any] = [self.source_vertex] # first in first out queue while queue: _UpperCAmelCase : int = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(A ) _UpperCAmelCase : List[Any] = vertex queue.append(A ) def __lowerCAmelCase ( self , A ) -> str: if target_vertex == self.source_vertex: return self.source_vertex _UpperCAmelCase : Dict = self.parent.get(A ) if target_vertex_parent is None: _UpperCAmelCase : int = ( f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}' ) raise ValueError(A ) return self.shortest_path(A ) + f'->{target_vertex}' if __name__ == "__main__": _lowerCAmelCase :List[str] = Graph(graph, 'G') g.breath_first_search() print(g.shortest_path('D')) print(g.shortest_path('G')) print(g.shortest_path('Foo'))
263
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_ (UpperCamelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0] def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
263
1
"""simple docstring""" from itertools import permutations def lowerCamelCase_ (UpperCamelCase__ : tuple ): if num[3] % 2 != 0: return False if (num[2] + num[3] + num[4]) % 3 != 0: return False if num[5] % 5 != 0: return False _UpperCAmelCase : Optional[Any] = [7, 11, 13, 17] for i, test in enumerate(UpperCamelCase__ ): if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0: return False return True def lowerCamelCase_ (UpperCamelCase__ : int = 10 ): return sum( int(''''''.join(map(UpperCamelCase__ , UpperCamelCase__ ) ) ) for num in permutations(range(UpperCamelCase__ ) ) if is_substring_divisible(UpperCamelCase__ ) ) if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" _UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:] _UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel _lowerCAmelCase :Optional[int] = logging.getLogger(__name__) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] ): # save results if os.path.exists(UpperCamelCase__ ): if os.path.exists(os.path.join(UpperCamelCase__ , '''config.json''' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , '''config.json''' ) ): os.remove(os.path.join(UpperCamelCase__ , '''config.json''' ) ) if os.path.exists(os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) ): os.remove(os.path.join(UpperCamelCase__ , '''pytorch_model.bin''' ) ) else: os.makedirs(UpperCamelCase__ ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Dict=False ): _UpperCAmelCase : int = 2 if unlogit: _UpperCAmelCase : List[str] = torch.pow(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = p * torch.log(UpperCamelCase__ ) _UpperCAmelCase : List[str] = 0 return -plogp.sum(dim=-1 ) def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] ): logger.info('''lv, h >\t''' + '''\t'''.join(F'{x + 1}' for x in range(len(UpperCamelCase__ ) ) ) ) for row in range(len(UpperCamelCase__ ) ): if tensor.dtype != torch.long: logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:.5f}' for x in tensor[row].cpu().data ) ) else: logger.info(F'layer {row + 1}:\t' + '''\t'''.join(F'{x:d}' for x in tensor[row].cpu().data ) ) def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=False ): _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = model.config.num_hidden_layers, model.config.num_attention_heads _UpperCAmelCase : str = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) _UpperCAmelCase : Union[str, Any] = torch.zeros(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) if head_mask is None: _UpperCAmelCase : Optional[int] = torch.ones(UpperCamelCase__ , UpperCamelCase__ ).to(args.device ) head_mask.requires_grad_(requires_grad=UpperCamelCase__ ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: _UpperCAmelCase : Optional[Any] = None _UpperCAmelCase : str = 0.0 _UpperCAmelCase : Optional[Any] = 0.0 for step, inputs in enumerate(tqdm(UpperCamelCase__ , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): _UpperCAmelCase : Dict = tuple(t.to(args.device ) for t in inputs ) ((_UpperCAmelCase) , ) : List[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) _UpperCAmelCase : List[Any] = model(UpperCamelCase__ , labels=UpperCamelCase__ , head_mask=UpperCamelCase__ ) # (loss), lm_logits, presents, (all hidden_states), (attentions) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(UpperCamelCase__ ): _UpperCAmelCase : Optional[int] = entropy(attn.detach() , UpperCamelCase__ ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(UpperCamelCase__ ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: _UpperCAmelCase : Tuple = 2 _UpperCAmelCase : List[str] = torch.pow(torch.pow(UpperCamelCase__ , UpperCamelCase__ ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-2_0 if not args.dont_normalize_global_importance: _UpperCAmelCase : str = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(UpperCamelCase__ ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(UpperCamelCase__ ) logger.info('''Head ranked by importance scores''' ) _UpperCAmelCase : Any = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) _UpperCAmelCase : List[Any] = torch.arange( head_importance.numel() , device=args.device ) _UpperCAmelCase : str = head_ranks.view_as(UpperCamelCase__ ) print_ad_tensor(UpperCamelCase__ ) return attn_entropy, head_importance, total_loss def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ ) _UpperCAmelCase : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , UpperCamelCase__ , original_score * args.masking_threshold ) _UpperCAmelCase : Dict = torch.ones_like(UpperCamelCase__ ) _UpperCAmelCase : Any = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) _UpperCAmelCase : Optional[Any] = original_score while current_score >= original_score * args.masking_threshold: _UpperCAmelCase : Any = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads _UpperCAmelCase : Dict = float('''Inf''' ) _UpperCAmelCase : int = head_importance.view(-1 ).sort()[1] if len(UpperCamelCase__ ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads _UpperCAmelCase : Optional[Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) _UpperCAmelCase : Optional[int] = new_head_mask.view(-1 ) _UpperCAmelCase : Union[str, Any] = 0.0 _UpperCAmelCase : str = new_head_mask.view_as(UpperCamelCase__ ) _UpperCAmelCase : Dict = new_head_mask.clone().detach() print_ad_tensor(UpperCamelCase__ ) # Compute metric and head importance again _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , head_mask=UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , UpperCamelCase__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(UpperCamelCase__ ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] ): _UpperCAmelCase : Dict = datetime.now() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ ) _UpperCAmelCase : List[Any] = 1 / loss _UpperCAmelCase : Any = datetime.now() - before_time _UpperCAmelCase : str = sum(p.numel() for p in model.parameters() ) _UpperCAmelCase : Dict = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(UpperCamelCase__ ) ) } for k, v in heads_to_prune.items(): if isinstance(UpperCamelCase__ , UpperCamelCase__ ): _UpperCAmelCase : List[str] = [ v, ] assert sum(len(UpperCamelCase__ ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(UpperCamelCase__ ) _UpperCAmelCase : Dict = sum(p.numel() for p in model.parameters() ) _UpperCAmelCase : Dict = datetime.now() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = compute_heads_importance( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , compute_entropy=UpperCamelCase__ , compute_importance=UpperCamelCase__ , head_mask=UpperCamelCase__ , actually_pruned=UpperCamelCase__ , ) _UpperCAmelCase : Optional[Any] = 1 / loss _UpperCAmelCase : List[str] = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , UpperCamelCase__ , UpperCamelCase__ , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , UpperCamelCase__ , UpperCamelCase__ ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(UpperCamelCase__ , args.output_dir ) def lowerCamelCase_ (): _UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , required=UpperCamelCase__ , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=UpperCamelCase__ , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=UpperCamelCase__ , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=UpperCamelCase__ , type=UpperCamelCase__ , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=UpperCamelCase__ , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=UpperCamelCase__ , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=UpperCamelCase__ , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=UpperCamelCase__ , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=UpperCamelCase__ , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=UpperCamelCase__ , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=UpperCamelCase__ , default=42 ) parser.add_argument('''--local_rank''' , type=UpperCamelCase__ , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=UpperCamelCase__ , default='''''' , help='''Can be used for distant debugging.''' ) _UpperCAmelCase : Union[str, Any] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=UpperCamelCase__ ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: _UpperCAmelCase : Dict = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) _UpperCAmelCase : Optional[Any] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) _UpperCAmelCase : Union[str, Any] = torch.device('''cuda''' , args.local_rank ) _UpperCAmelCase : List[str] = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) _UpperCAmelCase : Optional[Any] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: _UpperCAmelCase : Union[str, Any] = nn.parallel.DistributedDataParallel( UpperCamelCase__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=UpperCamelCase__ ) elif args.n_gpu > 1: _UpperCAmelCase : List[Any] = nn.DataParallel(UpperCamelCase__ ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ ) # Prepare dataset _UpperCAmelCase : List[str] = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) _UpperCAmelCase : Optional[int] = (torch.from_numpy(UpperCamelCase__ ),) _UpperCAmelCase : Any = TensorDataset(*UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = RandomSampler(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = DataLoader(UpperCamelCase__ , sampler=UpperCamelCase__ , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: _UpperCAmelCase : Tuple = mask_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) prune_heads(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" def lowerCamelCase_ (): return [ a * b * (1000 - a - b) for a in range(1 , 999 ) for b in range(UpperCamelCase__ , 999 ) if (a * a + b * b == (1000 - a - b) ** 2) ][0] if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) _lowerCAmelCase :List[str] = '▁' _lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'} _lowerCAmelCase :List[Any] = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } _lowerCAmelCase :Tuple = { 'xlm-roberta-base': 512, 'xlm-roberta-large': 512, 'xlm-roberta-large-finetuned-conll02-dutch': 512, 'xlm-roberta-large-finetuned-conll02-spanish': 512, 'xlm-roberta-large-finetuned-conll03-english': 512, 'xlm-roberta-large-finetuned-conll03-german': 512, } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token _UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) _UpperCAmelCase : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset _UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = self.__dict__.copy() _UpperCAmelCase : List[str] = None _UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , A ) -> Optional[int]: _UpperCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self , A , A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : Any = [self.cls_token_id] _UpperCAmelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Dict = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , A ) -> List[str]: return self.sp_model.encode(A , out_type=A ) def __lowerCAmelCase ( self , A ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase : Any = self.sp_model.PieceToId(A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self , A ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , A ) -> int: _UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip() return out_string def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : List[Any] = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A ) elif not os.path.isfile(self.vocab_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : str = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
263
1
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) _UpperCAmelCase : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , *A , **A ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , A , ) super().__init__(*A , **A )
263
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## _lowerCAmelCase :Dict = 16 _lowerCAmelCase :Optional[int] = 32 def lowerCamelCase_ (UpperCamelCase__ : Accelerator , UpperCamelCase__ : int = 16 ): _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''bert-base-cased''' ) _UpperCAmelCase : int = load_dataset('''glue''' , '''mrpc''' ) def tokenize_function(UpperCamelCase__ : List[str] ): # max_length=None => use the model max length (it's actually the default) _UpperCAmelCase : Optional[Any] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _UpperCAmelCase : Optional[Any] = datasets.map( UpperCamelCase__ , batched=UpperCamelCase__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _UpperCAmelCase : List[Any] = tokenized_datasets.rename_column('''label''' , '''labels''' ) def collate_fn(UpperCamelCase__ : int ): # On TPU it's best to pad everything to the same length or training will be very slow. _UpperCAmelCase : Union[str, Any] = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _UpperCAmelCase : Dict = 16 elif accelerator.mixed_precision != "no": _UpperCAmelCase : Optional[int] = 8 else: _UpperCAmelCase : int = None return tokenizer.pad( UpperCamelCase__ , padding='''longest''' , max_length=UpperCamelCase__ , pad_to_multiple_of=UpperCamelCase__ , return_tensors='''pt''' , ) # Instantiate dataloaders. _UpperCAmelCase : str = DataLoader( tokenized_datasets['''train'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) _UpperCAmelCase : str = DataLoader( tokenized_datasets['''validation'''] , shuffle=UpperCamelCase__ , collate_fn=UpperCamelCase__ , batch_size=UpperCamelCase__ ) return train_dataloader, eval_dataloader # For testing only if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1": from accelerate.test_utils.training import mocked_dataloaders _lowerCAmelCase :int = mocked_dataloaders # noqa: F811 def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : str ): # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , UpperCamelCase__ ) == "1": _UpperCAmelCase : Dict = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: _UpperCAmelCase : str = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: _UpperCAmelCase : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _UpperCAmelCase : List[str] = config['''lr'''] _UpperCAmelCase : Optional[int] = int(config['''num_epochs'''] ) _UpperCAmelCase : List[str] = int(config['''seed'''] ) _UpperCAmelCase : Any = int(config['''batch_size'''] ) set_seed(UpperCamelCase__ ) _UpperCAmelCase , _UpperCAmelCase : int = get_dataloaders(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : int = evaluate.load('''glue''' , '''mrpc''' ) # If the batch size is too big we use gradient accumulation _UpperCAmelCase : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _UpperCAmelCase : str = batch_size // MAX_GPU_BATCH_SIZE _UpperCAmelCase : Union[str, Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) _UpperCAmelCase : Dict = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=UpperCamelCase__ ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _UpperCAmelCase : str = model.to(accelerator.device ) # Instantiate optimizer _UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=UpperCamelCase__ ) # Instantiate scheduler _UpperCAmelCase : Dict = get_linear_schedule_with_warmup( optimizer=UpperCamelCase__ , num_warmup_steps=100 , num_training_steps=(len(UpperCamelCase__ ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = accelerator.prepare( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: _UpperCAmelCase : Optional[int] = os.path.split(UpperCamelCase__ )[-1].split('''.''' )[0] accelerator.init_trackers(UpperCamelCase__ , UpperCamelCase__ ) # Now we train the model for epoch in range(UpperCamelCase__ ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: _UpperCAmelCase : Optional[int] = 0 for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _UpperCAmelCase : List[str] = model(**UpperCamelCase__ ) _UpperCAmelCase : List[Any] = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() _UpperCAmelCase : List[str] = loss / gradient_accumulation_steps accelerator.backward(UpperCamelCase__ ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(UpperCamelCase__ ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): _UpperCAmelCase : Optional[Any] = model(**UpperCamelCase__ ) _UpperCAmelCase : int = outputs.logits.argmax(dim=-1 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) ) metric.add_batch( predictions=UpperCamelCase__ , references=UpperCamelCase__ , ) _UpperCAmelCase : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'epoch {epoch}:' , UpperCamelCase__ ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { '''accuracy''': eval_metric['''accuracy'''], '''f1''': eval_metric['''f1'''], '''train_loss''': total_loss.item() / len(UpperCamelCase__ ), '''epoch''': epoch, } , step=UpperCamelCase__ , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowerCamelCase_ (): _UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument( '''--mixed_precision''' , type=UpperCamelCase__ , default=UpperCamelCase__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=UpperCamelCase__ , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) _UpperCAmelCase : Tuple = parser.parse_args() _UpperCAmelCase : Optional[int] = {'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16} training_function(UpperCamelCase__ , UpperCamelCase__ ) if __name__ == "__main__": main()
263
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ): # Load configuration defined in the metadata file with open(UpperCamelCase__ ) as metadata_file: _UpperCAmelCase : Dict = json.load(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' ) # Load the entity vocab file _UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens _UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name] _UpperCAmelCase : Tuple = state_dict[prefix + matrix_name] _UpperCAmelCase : str = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']] _UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval() _UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )): raise ValueError( '''Unexpected keys''' F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs _UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' ) _UpperCAmelCase : List[str] = ( '''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the''' ''' new world number one avoid a humiliating second- round exit at Wimbledon .''' ) _UpperCAmelCase : Dict = (39, 42) _UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' ) _UpperCAmelCase : List[Any] = model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": _UpperCAmelCase : str = torch.Size((1, 42, 1024) ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base _UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) ) _UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _UpperCAmelCase : int = torch.Size((1, 1, 1024) ) _UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base _UpperCAmelCase : List[str] = torch.Size((1, 1, 768) ) _UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ): _UpperCAmelCase : Any = {} with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(UpperCamelCase__ ): _UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' ) _UpperCAmelCase : Tuple = index return entity_vocab if __name__ == "__main__": _lowerCAmelCase :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) _lowerCAmelCase :Any = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
263
1
"""simple docstring""" import os import shutil import tempfile import unittest import numpy as np from transformers import AutoTokenizer, BarkProcessor from transformers.testing_utils import require_torch, slow @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Any = '''ylacombe/bark-small''' _UpperCAmelCase : Any = tempfile.mkdtemp() _UpperCAmelCase : Union[str, Any] = '''en_speaker_1''' _UpperCAmelCase : Optional[Any] = '''This is a test string''' _UpperCAmelCase : List[str] = '''speaker_embeddings_path.json''' _UpperCAmelCase : Optional[int] = '''speaker_embeddings''' def __lowerCAmelCase ( self , **A ) -> str: return AutoTokenizer.from_pretrained(self.checkpoint , **A ) def __lowerCAmelCase ( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : str = self.get_tokenizer() _UpperCAmelCase : Tuple = BarkProcessor(tokenizer=A ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : Union[str, Any] = BarkProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) @slow def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Optional[Any] = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) processor.save_pretrained( self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , ) _UpperCAmelCase : Any = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase : Any = BarkProcessor.from_pretrained( self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Dict = BarkProcessor.from_pretrained( pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , ) _UpperCAmelCase : Dict = 3_5 _UpperCAmelCase : Any = 2 _UpperCAmelCase : Any = 8 _UpperCAmelCase : Any = { '''semantic_prompt''': np.ones(A ), '''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ), '''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ), } # test providing already loaded voice_preset _UpperCAmelCase : Optional[Any] = processor(text=self.input_string , voice_preset=A ) _UpperCAmelCase : Optional[int] = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A , np.array([] ) ).tolist() ) # test loading voice preset from npz file _UpperCAmelCase : str = os.path.join(self.tmpdirname , '''file.npz''' ) np.savez(A , **A ) _UpperCAmelCase : Optional[Any] = processor(text=self.input_string , voice_preset=A ) _UpperCAmelCase : str = inputs['''history_prompt'''] for key in voice_preset: self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(A , np.array([] ) ).tolist() ) # test loading voice preset from the hub _UpperCAmelCase : Optional[int] = processor(text=self.input_string , voice_preset=self.voice_preset ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : List[str] = self.get_tokenizer() _UpperCAmelCase : Optional[Any] = BarkProcessor(tokenizer=A ) _UpperCAmelCase : str = processor(text=self.input_string ) _UpperCAmelCase : Tuple = tokenizer( self.input_string , padding='''max_length''' , max_length=2_5_6 , add_special_tokens=A , return_attention_mask=A , return_token_type_ids=A , ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
263
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowerCAmelCase :str = object() # For specifying empty leaf dict `{}` _lowerCAmelCase :str = object() def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ): _UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def lowerCamelCase_ (UpperCamelCase__ : List[str] ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def lowerCamelCase_ (): return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : List[str] = _get_partition_rules() _UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
263
1
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar _lowerCAmelCase :str = TypeVar('_T') class _UpperCAmelCase ( Generic[_T] ): '''simple docstring''' def __init__( self , A = None ) -> None: _UpperCAmelCase : list[_T] = list(iterable or [] ) _UpperCAmelCase : list[_T] = [] def __len__( self ) -> int: return len(self._stacka ) + len(self._stacka ) def __repr__( self ) -> str: return f'Queue({tuple(self._stacka[::-1] + self._stacka )})' def __lowerCAmelCase ( self , A ) -> None: self._stacka.append(A ) def __lowerCAmelCase ( self ) -> _T: _UpperCAmelCase : List[Any] = self._stacka.pop _UpperCAmelCase : Optional[Any] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop() ) if not self._stacka: raise IndexError('''Queue is empty''' ) return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
263
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : str = pipeline( task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = pipeline( task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , ) # This is an audio of a dog _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ] , ) _UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) _UpperCAmelCase : Tuple = audio_classifier( [audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> int: pass
263
1
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self ) -> Any: super().__init__() _UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) _UpperCAmelCase : Optional[Any] = nn.BatchNormad(4 ) _UpperCAmelCase : Dict = nn.Linear(4 , 5 ) def __lowerCAmelCase ( self , A ) -> Tuple: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class _UpperCAmelCase ( a ): '''simple docstring''' def __lowerCAmelCase ( self , A , *A , **A ) -> Dict: return (args[0] + 1,) + args[1:], kwargs class _UpperCAmelCase ( a ): '''simple docstring''' def __lowerCAmelCase ( self , A , A ) -> Dict: return output + 1 class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = ModelForTest() _UpperCAmelCase : Optional[int] = ModelHook() add_hook_to_module(A , A ) self.assertEqual(test_model._hf_hook , A ) self.assertTrue(hasattr(A , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(A ) self.assertFalse(hasattr(A , '''_hf_hook''' ) ) self.assertFalse(hasattr(A , '''_old_forward''' ) ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Optional[int] = ModelForTest() _UpperCAmelCase : Optional[int] = ModelHook() add_hook_to_module(A , A ) add_hook_to_module(A , A , append=A ) self.assertEqual(isinstance(test_model._hf_hook , A ) , A ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(A , '''_old_forward''' ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , '''forward''' ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ['''x'''] ) remove_hook_from_module(A ) self.assertFalse(hasattr(A , '''_hf_hook''' ) ) self.assertFalse(hasattr(A , '''_old_forward''' ) ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Tuple = ModelForTest() _UpperCAmelCase : Optional[Any] = torch.randn(2 , 3 ) _UpperCAmelCase : Optional[int] = test_model(x + 1 ) _UpperCAmelCase : str = test_model(x + 2 ) _UpperCAmelCase : List[Any] = PreForwardHook() add_hook_to_module(A , A ) _UpperCAmelCase : Tuple = test_model(A ) self.assertTrue(torch.allclose(A , A , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _UpperCAmelCase : Optional[int] = PreForwardHook() add_hook_to_module(A , A ) _UpperCAmelCase : int = test_model(A ) self.assertTrue(torch.allclose(A , A , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _UpperCAmelCase : Dict = SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(A , A ) _UpperCAmelCase : int = test_model(A ) assert torch.allclose(A , A , atol=1E-5 ) def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Optional[int] = ModelForTest() _UpperCAmelCase : Dict = torch.randn(2 , 3 ) _UpperCAmelCase : Union[str, Any] = test_model(A ) _UpperCAmelCase : Any = PostForwardHook() add_hook_to_module(A , A ) _UpperCAmelCase : str = test_model(A ) self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain _UpperCAmelCase : List[Any] = PostForwardHook() add_hook_to_module(A , A ) _UpperCAmelCase : Dict = test_model(A ) self.assertTrue(torch.allclose(A , output + 1 , atol=1E-5 ) ) # You need to use the sequential hook to chain two or more hooks _UpperCAmelCase : Optional[int] = SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(A , A ) _UpperCAmelCase : Union[str, Any] = test_model(A ) assert torch.allclose(A , output + 2 , atol=1E-5 ) def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Optional[Any] = ModelForTest() _UpperCAmelCase : Optional[int] = torch.randn(2 , 3 ) _UpperCAmelCase : Union[str, Any] = test_model(A ) _UpperCAmelCase : Optional[Any] = PostForwardHook() add_hook_to_module(A , A ) _UpperCAmelCase : int = test_model(A ) self.assertTrue(torch.allclose(A , output + 1 ) ) self.assertTrue(outputa.requires_grad ) _UpperCAmelCase : Union[str, Any] = True _UpperCAmelCase : Union[str, Any] = test_model(A ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device _UpperCAmelCase : Union[str, Any] = torch.randn(2 , 3 ) _UpperCAmelCase : List[str] = model(A ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(A , AlignDevicesHook(io_same_device=A ) ) _UpperCAmelCase : Dict = torch.randn(2 , 3 ).to(0 ) _UpperCAmelCase : Tuple = model(A ) self.assertEqual(output.device , torch.device(0 ) ) def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices _UpperCAmelCase : Dict = {'''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True} add_hook_to_module(model.lineara , AlignDevicesHook(**A ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**A ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCAmelCase : Tuple = torch.device(hook_kwargs['''execution_device'''] ) self.assertEqual(model.batchnorm.running_mean.device , A ) _UpperCAmelCase : Optional[Any] = torch.randn(2 , 3 ) _UpperCAmelCase : Dict = model(A ) self.assertEqual(output.device , A ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload _UpperCAmelCase : int = { '''execution_device''': 0 if torch.cuda.is_available() else '''cpu''', '''offload''': True, '''offload_buffers''': True, } add_hook_to_module(model.lineara , AlignDevicesHook(**A ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**A ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**A ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) _UpperCAmelCase : Optional[int] = torch.randn(2 , 3 ) _UpperCAmelCase : Optional[int] = model(A ) self.assertEqual(output.device , A ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : List[str] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices _UpperCAmelCase : Any = 0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook(A , execution_device=A , offload=A ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCAmelCase : str = torch.device(A ) self.assertEqual(model.batchnorm.running_mean.device , A ) _UpperCAmelCase : str = torch.randn(2 , 3 ) _UpperCAmelCase : Tuple = model(A ) self.assertEqual(output.device , A ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(A ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook(A , execution_device=A , offload=A , offload_buffers=A ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) _UpperCAmelCase : str = torch.randn(2 , 3 ) _UpperCAmelCase : Tuple = model(A ) self.assertEqual(output.device , A ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(A ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Union[str, Any] = ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # This will move each submodule on different devices _UpperCAmelCase : Optional[Any] = 0 if torch.cuda.is_available() else '''cpu''' attach_align_device_hook( A , execution_device=A , offload=A , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) # Buffers are not included in the offload by default, so are on the execution device _UpperCAmelCase : Any = torch.device(A ) self.assertEqual(model.batchnorm.running_mean.device , A ) _UpperCAmelCase : Any = torch.randn(2 , 3 ) _UpperCAmelCase : Optional[int] = model(A ) self.assertEqual(output.device , A ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(A ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) # Now test with buffers included in the offload attach_align_device_hook( A , execution_device=A , offload=A , weights_map=model.state_dict() , offload_buffers=A , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''meta''' ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device('''meta''' ) ) _UpperCAmelCase : Tuple = torch.randn(2 , 3 ) _UpperCAmelCase : Dict = model(A ) self.assertEqual(output.device , A ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(A ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.batchnorm.weight.device , torch.device('''cpu''' ) ) self.assertEqual(model.lineara.weight.device , torch.device('''cpu''' ) )
263
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCAmelCase :Tuple = logging.getLogger(__name__) def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ): def get_dataset(UpperCamelCase__ : List[str] ): _UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ): _UpperCAmelCase : Tuple = [] for epoch in range(UpperCamelCase__ ): # Train quickly model.train() for batch in dataloader: _UpperCAmelCase , _UpperCAmelCase : Dict = batch _UpperCAmelCase : int = model(UpperCamelCase__ ) _UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ ) accelerator.backward(UpperCamelCase__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self ) -> List[Any]: super().__init__() _UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) ) _UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) ) def __lowerCAmelCase ( self , A ) -> Tuple: return x * self.a + self.b class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __lowerCAmelCase ( self ) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Optional[Any] = DummyModel() _UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders() # Train baseline _UpperCAmelCase : Optional[int] = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare( A , A , A , A ) # Save initial _UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' ) accelerator.save_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() _UpperCAmelCase : Tuple = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : List[Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : Dict = DummyModel() _UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders() _UpperCAmelCase : Tuple = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A ) accelerator.load_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : List[str] = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A ) # Save everything _UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' ) accelerator.save_state(A ) # Load everything back in and make sure all states work accelerator.load_state(A ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() _UpperCAmelCase : int = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Union[str, Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A ) _UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : List[str] = train(2 , A , A , A , A ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item() _UpperCAmelCase : Tuple = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] ) _UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] ) _UpperCAmelCase : Optional[int] = DummyModel() _UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() ) _UpperCAmelCase : Optional[int] = Accelerator() with self.assertRaises(A ) as ve: accelerator.register_for_checkpointing(A , A , A , A ) _UpperCAmelCase : Dict = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Tuple = DummyModel() _UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 ) _UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A , A ) # Save initial accelerator.save_state() _UpperCAmelCase : List[str] = scheduler.state_dict() train(3 , A , A , A , A , A ) self.assertNotEqual(A , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(A , scheduler.state_dict() ) def __lowerCAmelCase ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase : Optional[Any] = accelerator.prepare(A ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(A , env=os.environ.copy() ) if __name__ == "__main__": _lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing' _lowerCAmelCase :Any = DummyModel() _lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3) _lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders() _lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCAmelCase :int = group['params'][0].device break assert param_device.type == accelerator.device.type _lowerCAmelCase :Dict = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: _lowerCAmelCase :List[Any] = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: _lowerCAmelCase :Union[str, Any] = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
263
1
"""simple docstring""" import unittest import torch from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel from diffusers.training_utils import set_seed from diffusers.utils.testing_utils import slow _lowerCAmelCase :Any = False class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self , A=3_2 ) -> List[Any]: set_seed(0 ) _UpperCAmelCase : Any = UNetaDModel(sample_size=A , in_channels=3 , out_channels=3 ) _UpperCAmelCase : str = torch.optim.SGD(model.parameters() , lr=0.0_001 ) return model, optimizer @slow def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : int = '''cpu''' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable _UpperCAmelCase : Optional[Any] = DDPMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=A , ) _UpperCAmelCase : List[str] = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_start=0.0_001 , beta_end=0.02 , beta_schedule='''linear''' , clip_sample=A , ) assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps # shared batches for DDPM and DDIM set_seed(0 ) _UpperCAmelCase : Tuple = [torch.randn((4, 3, 3_2, 3_2) ).clip(-1 , 1 ).to(A ) for _ in range(4 )] _UpperCAmelCase : str = [torch.randn((4, 3, 3_2, 3_2) ).to(A ) for _ in range(4 )] _UpperCAmelCase : int = [torch.randint(0 , 1_0_0_0 , (4,) ).long().to(A ) for _ in range(4 )] # train with a DDPM scheduler _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.get_model_optimizer(resolution=3_2 ) model.train().to(A ) for i in range(4 ): optimizer.zero_grad() _UpperCAmelCase : Any = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _UpperCAmelCase : int = model(A , timesteps[i] ).sample _UpperCAmelCase : Union[str, Any] = torch.nn.functional.mse_loss(A , noise[i] ) loss.backward() optimizer.step() del model, optimizer # recreate the model and optimizer, and retry with DDIM _UpperCAmelCase , _UpperCAmelCase : List[str] = self.get_model_optimizer(resolution=3_2 ) model.train().to(A ) for i in range(4 ): optimizer.zero_grad() _UpperCAmelCase : Union[str, Any] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] ) _UpperCAmelCase : Tuple = model(A , timesteps[i] ).sample _UpperCAmelCase : int = torch.nn.functional.mse_loss(A , noise[i] ) loss.backward() optimizer.step() del model, optimizer self.assertTrue(torch.allclose(A , A , atol=1E-5 ) ) self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase :str = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :str = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : bytes ): return "".join([hex(UpperCamelCase__ )[2:].zfill(2 ).upper() for byte in list(UpperCamelCase__ )] ) def lowerCamelCase_ (UpperCamelCase__ : str ): # Check data validity, following RFC3548 # https://www.ietf.org/rfc/rfc3548.txt if (len(UpperCamelCase__ ) % 2) != 0: raise ValueError( '''Base16 encoded data is invalid: Data does not have an even number of hex digits.''' ) # Check the character set - the standard base16 alphabet # is uppercase according to RFC3548 section 6 if not set(UpperCamelCase__ ) <= set('''0123456789ABCDEF''' ): raise ValueError( '''Base16 encoded data is invalid: Data is not uppercase hex or it contains invalid characters.''' ) # For every two hexadecimal digits (= a byte), turn it into an integer. # Then, string the result together into bytes, and return it. return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(UpperCamelCase__ ) , 2 ) ) if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase_ (UpperCamelCase__ : List[Any] ): _UpperCAmelCase : int = {} _UpperCAmelCase : Tuple = tokenizer(example['''content'''] , truncation=UpperCamelCase__ )['''input_ids'''] _UpperCAmelCase : Dict = len(example['''content'''] ) / len(output['''input_ids'''] ) return output _lowerCAmelCase :Tuple = HfArgumentParser(PretokenizationArguments) _lowerCAmelCase :Tuple = parser.parse_args() if args.num_workers is None: _lowerCAmelCase :Optional[Any] = multiprocessing.cpu_count() _lowerCAmelCase :List[str] = AutoTokenizer.from_pretrained(args.tokenizer_dir) _lowerCAmelCase :Dict = time.time() _lowerCAmelCase :int = load_dataset(args.dataset_name, split='train') print(f"Dataset loaded in {time.time()-t_start:.2f}s") _lowerCAmelCase :Tuple = time.time() _lowerCAmelCase :Tuple = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(f"Dataset tokenized in {time.time()-t_start:.2f}s") _lowerCAmelCase :Union[str, Any] = time.time() ds.push_to_hub(args.tokenized_data_repo) print(f"Data pushed to the hub in {time.time()-t_start:.2f}s")
263
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =IFImgaImgSuperResolutionPipeline a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Any = torch.manual_seed(A ) else: _UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
263
1
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class _UpperCAmelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__( self , A , A , A = None , A = None ) -> str: super().__init__() _UpperCAmelCase : Any = pad_token_id _UpperCAmelCase : Dict = max_length _UpperCAmelCase : Union[str, Any] = vocab _UpperCAmelCase : Optional[int] = merges _UpperCAmelCase : Optional[Any] = BytePairTokenizer(A , A , sequence_length=A ) @classmethod def __lowerCAmelCase ( cls , A , *A , **A ) -> Any: _UpperCAmelCase : Union[str, Any] = [''' '''.join(A ) for m in tokenizer.bpe_ranks.keys()] _UpperCAmelCase : List[Any] = tokenizer.get_vocab() return cls(A , A , *A , **A ) @classmethod def __lowerCAmelCase ( cls , A , *A , **A ) -> Union[str, Any]: _UpperCAmelCase : int = GPTaTokenizer.from_pretrained(A , *A , **A ) return cls.from_tokenizer(A , *A , **A ) @classmethod def __lowerCAmelCase ( cls , A ) -> Dict: return cls(**A ) def __lowerCAmelCase ( self ) -> Any: return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def __lowerCAmelCase ( self , A , A = None ) -> Union[str, Any]: _UpperCAmelCase : int = self.tf_tokenizer(A ) _UpperCAmelCase : str = tf.ones_like(A ) if self.pad_token_id is not None: # pad the tokens up to max length _UpperCAmelCase : str = max_length if max_length is not None else self.max_length if max_length is not None: _UpperCAmelCase , _UpperCAmelCase : Any = pad_model_inputs( A , max_seq_length=A , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) _UpperCAmelCase : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" import argparse import json from tqdm import tqdm def lowerCamelCase_ (): _UpperCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--src_path''' , type=UpperCamelCase__ , default='''biencoder-nq-dev.json''' , help='''Path to raw DPR training data''' , ) parser.add_argument( '''--evaluation_set''' , type=UpperCamelCase__ , help='''where to store parsed evaluation_set file''' , ) parser.add_argument( '''--gold_data_path''' , type=UpperCamelCase__ , help='''where to store parsed gold_data_path file''' , ) _UpperCAmelCase : List[str] = parser.parse_args() with open(args.src_path , '''r''' ) as src_file, open(args.evaluation_set , '''w''' ) as eval_file, open( args.gold_data_path , '''w''' ) as gold_file: _UpperCAmelCase : List[str] = json.load(UpperCamelCase__ ) for dpr_record in tqdm(UpperCamelCase__ ): _UpperCAmelCase : List[str] = dpr_record['''question'''] _UpperCAmelCase : str = [context['''title'''] for context in dpr_record['''positive_ctxs''']] eval_file.write(question + '''\n''' ) gold_file.write('''\t'''.join(UpperCamelCase__ ) + '''\n''' ) if __name__ == "__main__": main()
263
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ ) _UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model'''] _UpperCAmelCase : Optional[Any] = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase : Any = {} _UpperCAmelCase : Any = '''first_stage_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase : Tuple = {} _UpperCAmelCase : int = '''model.diffusion_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] _UpperCAmelCase : List[str] = config.model.params.first_stage_config.params _UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params _UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : int = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) _lowerCAmelCase :List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
263
1
"""simple docstring""" import math def lowerCamelCase_ (UpperCamelCase__ : int ): _UpperCAmelCase : Dict = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : float = 1 / 1_2345 ): _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : int = 0 _UpperCAmelCase : int = 3 while True: _UpperCAmelCase : Optional[int] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(UpperCamelCase__ ): _UpperCAmelCase : Any = int(UpperCamelCase__ ) total_partitions += 1 if check_partition_perfect(UpperCamelCase__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(UpperCamelCase__ ) integer += 1 if __name__ == "__main__": print(f"{solution() = }")
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) _lowerCAmelCase :Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''falcon''' a__ =['''past_key_values'''] def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any: _UpperCAmelCase : int = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A ) _UpperCAmelCase : int = hidden_size if n_embed is None else n_embed _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Optional[int] = use_cache _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase : Dict = alibi _UpperCAmelCase : Optional[int] = new_decoder_architecture _UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase : Optional[int] = parallel_attn _UpperCAmelCase : Optional[int] = bias super().__init__(bos_token_id=A , eos_token_id=A , **A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> List[Any]: return not self.alibi
263
1
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ): if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(UpperCamelCase__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) def lowerCamelCase_ (): _UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423] _UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
263
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :Dict = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'microsoft/wavlm-base': 'https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json', # See all WavLM models at https://huggingface.co/models?filter=wavlm } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''wavlm''' def __init__( self , A=3_2 , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A="gelu" , A=0.1 , A=0.1 , A=0.1 , A=0.0 , A=0.1 , A=0.1 , A=0.02 , A=1E-5 , A="group" , A="gelu" , A=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , A=(5, 2, 2, 2, 2, 2, 2) , A=(1_0, 3, 3, 3, 3, 2, 2) , A=False , A=1_2_8 , A=1_6 , A=3_2_0 , A=8_0_0 , A=False , A=True , A=0.05 , A=1_0 , A=2 , A=0.0 , A=1_0 , A=3_2_0 , A=2 , A=0.1 , A=1_0_0 , A=2_5_6 , A=2_5_6 , A=0.1 , A="mean" , A=False , A=False , A=2_5_6 , A=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , A=(5, 3, 3, 1, 1) , A=(1, 2, 3, 1, 1) , A=5_1_2 , A=8_0 , A=0 , A=1 , A=2 , A=False , A=3 , A=2 , A=3 , A=None , **A , ) -> List[str]: super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A ) _UpperCAmelCase : List[Any] = hidden_size _UpperCAmelCase : Union[str, Any] = feat_extract_norm _UpperCAmelCase : Optional[int] = feat_extract_activation _UpperCAmelCase : Optional[int] = list(A ) _UpperCAmelCase : Tuple = list(A ) _UpperCAmelCase : Optional[Any] = list(A ) _UpperCAmelCase : Optional[Any] = conv_bias _UpperCAmelCase : Union[str, Any] = num_buckets _UpperCAmelCase : Optional[Any] = max_bucket_distance _UpperCAmelCase : List[str] = num_conv_pos_embeddings _UpperCAmelCase : Tuple = num_conv_pos_embedding_groups _UpperCAmelCase : str = len(self.conv_dim ) _UpperCAmelCase : Union[str, Any] = num_hidden_layers _UpperCAmelCase : List[str] = intermediate_size _UpperCAmelCase : Dict = hidden_act _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : List[Any] = activation_dropout _UpperCAmelCase : Optional[Any] = feat_proj_dropout _UpperCAmelCase : Union[str, Any] = final_dropout _UpperCAmelCase : Optional[Any] = layerdrop _UpperCAmelCase : int = layer_norm_eps _UpperCAmelCase : List[Any] = initializer_range _UpperCAmelCase : Tuple = num_ctc_classes _UpperCAmelCase : List[str] = vocab_size _UpperCAmelCase : Tuple = do_stable_layer_norm _UpperCAmelCase : int = use_weighted_layer_sum _UpperCAmelCase : List[str] = classifier_proj_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' f' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' f' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _UpperCAmelCase : str = apply_spec_augment _UpperCAmelCase : Union[str, Any] = mask_time_prob _UpperCAmelCase : Optional[Any] = mask_time_length _UpperCAmelCase : str = mask_time_min_masks _UpperCAmelCase : List[Any] = mask_feature_prob _UpperCAmelCase : int = mask_feature_length # parameters for pretraining with codevector quantized representations _UpperCAmelCase : Any = num_codevectors_per_group _UpperCAmelCase : Dict = num_codevector_groups _UpperCAmelCase : List[Any] = contrastive_logits_temperature _UpperCAmelCase : List[str] = num_negatives _UpperCAmelCase : Tuple = codevector_dim _UpperCAmelCase : str = proj_codevector_dim _UpperCAmelCase : Tuple = diversity_loss_weight # ctc loss _UpperCAmelCase : Tuple = ctc_loss_reduction _UpperCAmelCase : Dict = ctc_zero_infinity # adapter _UpperCAmelCase : Optional[Any] = add_adapter _UpperCAmelCase : Tuple = adapter_kernel_size _UpperCAmelCase : Tuple = adapter_stride _UpperCAmelCase : int = num_adapter_layers _UpperCAmelCase : List[Any] = output_hidden_size or hidden_size # SequenceClassification-specific parameter. Feel free to ignore for other classes. _UpperCAmelCase : Union[str, Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. _UpperCAmelCase : Union[str, Any] = list(A ) _UpperCAmelCase : Optional[Any] = list(A ) _UpperCAmelCase : Tuple = list(A ) _UpperCAmelCase : Optional[Any] = xvector_output_dim @property def __lowerCAmelCase ( self ) -> Optional[Any]: return functools.reduce(operator.mul , self.conv_stride , 1 )
263
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase :Tuple = tuple[int, int] class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : set[int] = vertices _UpperCAmelCase : dict[EdgeT, int] = { (min(A ), max(A )): weight for edge, weight in edges.items() } def __lowerCAmelCase ( self , A , A ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _UpperCAmelCase : List[Any] = weight def __lowerCAmelCase ( self ) -> Graph: _UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} ) _UpperCAmelCase : EdgeT _UpperCAmelCase : int _UpperCAmelCase : EdgeT _UpperCAmelCase : int while len(subgraph.vertices ) < len(self.vertices ): _UpperCAmelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _UpperCAmelCase : Tuple = edge _UpperCAmelCase : Optional[int] = weight subgraph.add_edge(A , A ) return subgraph def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ): _UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : dict[EdgeT, int] = {} _UpperCAmelCase : list[str] _UpperCAmelCase : int _UpperCAmelCase : int with open(UpperCamelCase__ ) as f: _UpperCAmelCase : str = f.read().strip().split('''\n''' ) _UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": _UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] ) _UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ ) _UpperCAmelCase : Graph = graph.prims_algorithm() _UpperCAmelCase : int = sum(graph.edges.values() ) _UpperCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
263
1
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=0.999 , UpperCamelCase__ : Any="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(UpperCamelCase__ : str ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(UpperCamelCase__ : Optional[int] ): return math.exp(t * -12.0 ) else: raise ValueError(F'Unsupported alpha_tranform_type: {alpha_transform_type}' ) _UpperCAmelCase : Dict = [] for i in range(UpperCamelCase__ ): _UpperCAmelCase : List[str] = i / num_diffusion_timesteps _UpperCAmelCase : Any = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) ) return torch.tensor(UpperCamelCase__ , dtype=torch.floataa ) class _UpperCAmelCase ( a ,a ): '''simple docstring''' a__ =[e.name for e in KarrasDiffusionSchedulers] a__ =2 @register_to_config def __init__( self , A = 1_0_0_0 , A = 0.00_085 , A = 0.012 , A = "linear" , A = None , A = "epsilon" , A = False , A = False , A = 1.0 , A = "linspace" , A = 0 , ) -> Optional[int]: if trained_betas is not None: _UpperCAmelCase : Optional[Any] = torch.tensor(A , dtype=torch.floataa ) elif beta_schedule == "linear": _UpperCAmelCase : Union[str, Any] = torch.linspace(A , A , A , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. _UpperCAmelCase : Optional[Any] = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , A , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule _UpperCAmelCase : str = betas_for_alpha_bar(A , alpha_transform_type='''cosine''' ) elif beta_schedule == "exp": _UpperCAmelCase : int = betas_for_alpha_bar(A , alpha_transform_type='''exp''' ) else: raise NotImplementedError(f'{beta_schedule} does is not implemented for {self.__class__}' ) _UpperCAmelCase : List[str] = 1.0 - self.betas _UpperCAmelCase : str = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(A , A , A ) _UpperCAmelCase : Any = use_karras_sigmas def __lowerCAmelCase ( self , A , A=None ) -> Optional[int]: if schedule_timesteps is None: _UpperCAmelCase : List[str] = self.timesteps _UpperCAmelCase : Dict = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: _UpperCAmelCase : Any = 1 if len(A ) > 1 else 0 else: _UpperCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(A ) else timestep _UpperCAmelCase : Tuple = self._index_counter[timestep_int] return indices[pos].item() @property def __lowerCAmelCase ( self ) -> Any: # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __lowerCAmelCase ( self , A , A , ) -> torch.FloatTensor: _UpperCAmelCase : Tuple = self.index_for_timestep(A ) _UpperCAmelCase : Any = self.sigmas[step_index] _UpperCAmelCase : Tuple = sample / ((sigma**2 + 1) ** 0.5) return sample def __lowerCAmelCase ( self , A , A = None , A = None , ) -> Union[str, Any]: _UpperCAmelCase : int = num_inference_steps _UpperCAmelCase : List[Any] = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": _UpperCAmelCase : Optional[int] = np.linspace(0 , num_train_timesteps - 1 , A , dtype=A )[::-1].copy() elif self.config.timestep_spacing == "leading": _UpperCAmelCase : str = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _UpperCAmelCase : List[Any] = (np.arange(0 , A ) * step_ratio).round()[::-1].copy().astype(A ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": _UpperCAmelCase : Optional[int] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 _UpperCAmelCase : Tuple = (np.arange(A , 0 , -step_ratio )).round().copy().astype(A ) timesteps -= 1 else: raise ValueError( f'{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.' ) _UpperCAmelCase : List[str] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) _UpperCAmelCase : Union[str, Any] = np.log(A ) _UpperCAmelCase : str = np.interp(A , np.arange(0 , len(A ) ) , A ) if self.config.use_karras_sigmas: _UpperCAmelCase : Any = self._convert_to_karras(in_sigmas=A , num_inference_steps=self.num_inference_steps ) _UpperCAmelCase : int = np.array([self._sigma_to_t(A , A ) for sigma in sigmas] ) _UpperCAmelCase : int = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) _UpperCAmelCase : Dict = torch.from_numpy(A ).to(device=A ) _UpperCAmelCase : Tuple = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] ) _UpperCAmelCase : Optional[int] = torch.from_numpy(A ) _UpperCAmelCase : List[Any] = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] ) if str(A ).startswith('''mps''' ): # mps does not support float64 _UpperCAmelCase : List[Any] = timesteps.to(A , dtype=torch.floataa ) else: _UpperCAmelCase : Tuple = timesteps.to(device=A ) # empty dt and derivative _UpperCAmelCase : Dict = None _UpperCAmelCase : Dict = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter _UpperCAmelCase : Optional[Any] = defaultdict(A ) def __lowerCAmelCase ( self , A , A ) -> Tuple: # get log sigma _UpperCAmelCase : Dict = np.log(A ) # get distribution _UpperCAmelCase : Union[str, Any] = log_sigma - log_sigmas[:, np.newaxis] # get sigmas range _UpperCAmelCase : str = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 ) _UpperCAmelCase : List[str] = low_idx + 1 _UpperCAmelCase : int = log_sigmas[low_idx] _UpperCAmelCase : Any = log_sigmas[high_idx] # interpolate sigmas _UpperCAmelCase : Optional[Any] = (low - log_sigma) / (low - high) _UpperCAmelCase : Optional[int] = np.clip(A , 0 , 1 ) # transform interpolation to time range _UpperCAmelCase : List[str] = (1 - w) * low_idx + w * high_idx _UpperCAmelCase : List[str] = t.reshape(sigma.shape ) return t def __lowerCAmelCase ( self , A , A ) -> torch.FloatTensor: _UpperCAmelCase : float = in_sigmas[-1].item() _UpperCAmelCase : float = in_sigmas[0].item() _UpperCAmelCase : List[Any] = 7.0 # 7.0 is the value used in the paper _UpperCAmelCase : Optional[Any] = np.linspace(0 , 1 , A ) _UpperCAmelCase : Any = sigma_min ** (1 / rho) _UpperCAmelCase : str = sigma_max ** (1 / rho) _UpperCAmelCase : Optional[int] = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho return sigmas @property def __lowerCAmelCase ( self ) -> Any: return self.dt is None def __lowerCAmelCase ( self , A , A , A , A = True , ) -> Union[SchedulerOutput, Tuple]: _UpperCAmelCase : Union[str, Any] = self.index_for_timestep(A ) # advance index counter by 1 _UpperCAmelCase : int = timestep.cpu().item() if torch.is_tensor(A ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: _UpperCAmelCase : List[Any] = self.sigmas[step_index] _UpperCAmelCase : List[str] = self.sigmas[step_index + 1] else: # 2nd order / Heun's method _UpperCAmelCase : Dict = self.sigmas[step_index - 1] _UpperCAmelCase : List[Any] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API _UpperCAmelCase : List[Any] = 0 _UpperCAmelCase : Union[str, Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": _UpperCAmelCase : Optional[Any] = sigma_hat if self.state_in_first_order else sigma_next _UpperCAmelCase : List[Any] = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": _UpperCAmelCase : Any = sigma_hat if self.state_in_first_order else sigma_next _UpperCAmelCase : Any = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": _UpperCAmelCase : int = model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`' ) if self.config.clip_sample: _UpperCAmelCase : int = pred_original_sample.clamp( -self.config.clip_sample_range , self.config.clip_sample_range ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order _UpperCAmelCase : List[str] = (sample - pred_original_sample) / sigma_hat # 3. delta timestep _UpperCAmelCase : Optional[int] = sigma_next - sigma_hat # store for 2nd order step _UpperCAmelCase : int = derivative _UpperCAmelCase : Optional[int] = dt _UpperCAmelCase : int = sample else: # 2. 2nd order / Heun's method _UpperCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_next _UpperCAmelCase : str = (self.prev_derivative + derivative) / 2 # 3. take prev timestep & sample _UpperCAmelCase : Dict = self.dt _UpperCAmelCase : Tuple = self.sample # free dt and derivative # Note, this puts the scheduler in "first order mode" _UpperCAmelCase : Optional[int] = None _UpperCAmelCase : int = None _UpperCAmelCase : int = None _UpperCAmelCase : int = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=A ) def __lowerCAmelCase ( self , A , A , A , ) -> torch.FloatTensor: # Make sure sigmas and timesteps have the same device and dtype as original_samples _UpperCAmelCase : Dict = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(A ): # mps does not support float64 _UpperCAmelCase : Tuple = self.timesteps.to(original_samples.device , dtype=torch.floataa ) _UpperCAmelCase : List[Any] = timesteps.to(original_samples.device , dtype=torch.floataa ) else: _UpperCAmelCase : Union[str, Any] = self.timesteps.to(original_samples.device ) _UpperCAmelCase : Dict = timesteps.to(original_samples.device ) _UpperCAmelCase : Optional[Any] = [self.index_for_timestep(A , A ) for t in timesteps] _UpperCAmelCase : str = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): _UpperCAmelCase : Optional[int] = sigma.unsqueeze(-1 ) _UpperCAmelCase : Any = original_samples + noise * sigma return noisy_samples def __len__( self ) -> Any: return self.config.num_train_timesteps
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''mgp-str''' def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]: super().__init__(**A ) _UpperCAmelCase : Any = image_size _UpperCAmelCase : str = patch_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Dict = max_token_length _UpperCAmelCase : Optional[Any] = num_character_labels _UpperCAmelCase : int = num_bpe_labels _UpperCAmelCase : List[str] = num_wordpiece_labels _UpperCAmelCase : Optional[int] = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : List[Any] = mlp_ratio _UpperCAmelCase : List[str] = distilled _UpperCAmelCase : Optional[int] = layer_norm_eps _UpperCAmelCase : str = drop_rate _UpperCAmelCase : List[Any] = qkv_bias _UpperCAmelCase : List[str] = attn_drop_rate _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : Union[str, Any] = output_aa_attentions _UpperCAmelCase : List[str] = initializer_range
263
1
"""simple docstring""" from dataclasses import asdict, dataclass from typing import Optional from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[Any] = logging.get_logger(__name__) # TODO Update this _lowerCAmelCase :List[str] = { 'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json', # See all ESM models at https://huggingface.co/models?filter=esm } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''esm''' def __init__( self , A=None , A=None , A=None , A=7_6_8 , A=1_2 , A=1_2 , A=3_0_7_2 , A=0.1 , A=0.1 , A=1_0_2_6 , A=0.02 , A=1E-12 , A="absolute" , A=True , A=None , A=False , A=False , A=None , A=None , **A , ) -> List[Any]: super().__init__(pad_token_id=A , mask_token_id=A , **A ) _UpperCAmelCase : Union[str, Any] = vocab_size _UpperCAmelCase : Dict = hidden_size _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : List[str] = num_attention_heads _UpperCAmelCase : Optional[Any] = intermediate_size _UpperCAmelCase : Optional[Any] = hidden_dropout_prob _UpperCAmelCase : str = attention_probs_dropout_prob _UpperCAmelCase : int = max_position_embeddings _UpperCAmelCase : Optional[Any] = initializer_range _UpperCAmelCase : Tuple = layer_norm_eps _UpperCAmelCase : List[str] = position_embedding_type _UpperCAmelCase : List[Any] = use_cache _UpperCAmelCase : Optional[Any] = emb_layer_norm_before _UpperCAmelCase : Optional[int] = token_dropout _UpperCAmelCase : List[Any] = is_folding_model if is_folding_model: if esmfold_config is None: logger.info('''No esmfold_config supplied for folding model, using default values.''' ) _UpperCAmelCase : int = EsmFoldConfig() elif isinstance(A , A ): _UpperCAmelCase : Optional[int] = EsmFoldConfig(**A ) _UpperCAmelCase : Tuple = esmfold_config if vocab_list is None: logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' ) _UpperCAmelCase : Tuple = get_default_vocab_list() else: _UpperCAmelCase : List[str] = vocab_list else: _UpperCAmelCase : List[str] = None _UpperCAmelCase : int = None if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , A ): raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : str = super().to_dict() if isinstance(self.esmfold_config , A ): _UpperCAmelCase : List[str] = self.esmfold_config.to_dict() return output @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =None a__ =True a__ =False a__ =False a__ =False a__ =0 a__ =True a__ =False a__ =1_2_8 a__ =None def __lowerCAmelCase ( self ) -> str: if self.trunk is None: _UpperCAmelCase : List[str] = TrunkConfig() elif isinstance(self.trunk , A ): _UpperCAmelCase : List[Any] = TrunkConfig(**self.trunk ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : List[Any] = asdict(self ) _UpperCAmelCase : Optional[Any] = self.trunk.to_dict() return output @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =4_8 a__ =1_0_2_4 a__ =1_2_8 a__ =3_2 a__ =3_2 a__ =3_2 a__ =0 a__ =0 a__ =False a__ =4 a__ =1_2_8 a__ =None def __lowerCAmelCase ( self ) -> List[str]: if self.structure_module is None: _UpperCAmelCase : int = StructureModuleConfig() elif isinstance(self.structure_module , A ): _UpperCAmelCase : Optional[int] = StructureModuleConfig(**self.structure_module ) if self.max_recycles <= 0: raise ValueError(f'`max_recycles` should be positive, got {self.max_recycles}.' ) if self.sequence_state_dim % self.sequence_state_dim != 0: raise ValueError( '''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got''' f' {self.sequence_state_dim} and {self.sequence_state_dim}.' ) if self.pairwise_state_dim % self.pairwise_state_dim != 0: raise ValueError( '''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got''' f' {self.pairwise_state_dim} and {self.pairwise_state_dim}.' ) _UpperCAmelCase : Any = self.sequence_state_dim // self.sequence_head_width _UpperCAmelCase : Dict = self.pairwise_state_dim // self.pairwise_head_width if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width: raise ValueError( '''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got''' f' {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}.' ) if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width: raise ValueError( '''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got''' f' {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}.' ) if self.pairwise_state_dim % 2 != 0: raise ValueError(f'`pairwise_state_dim` should be even, got {self.pairwise_state_dim}.' ) if self.dropout >= 0.4: raise ValueError(f'`dropout` should not be greater than 0.4, got {self.dropout}.' ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : Optional[Any] = asdict(self ) _UpperCAmelCase : Optional[Any] = self.structure_module.to_dict() return output @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =3_8_4 a__ =1_2_8 a__ =1_6 a__ =1_2_8 a__ =1_2 a__ =4 a__ =8 a__ =0.1 a__ =8 a__ =1 a__ =2 a__ =7 a__ =1_0 a__ =1e-8 a__ =1e5 def __lowerCAmelCase ( self ) -> str: return asdict(self ) def lowerCamelCase_ (): return ( "<cls>", "<pad>", "<eos>", "<unk>", "L", "A", "G", "V", "S", "E", "R", "T", "I", "D", "P", "K", "Q", "N", "F", "Y", "M", "H", "W", "C", "X", "B", "U", "Z", "O", ".", "-", "<null_1>", "<mask>", )
263
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ): if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(UpperCamelCase__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) def lowerCamelCase_ (): _UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423] _UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
263
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : List[str] = [ '''encoder.version''', '''decoder.version''', '''model.encoder.version''', '''model.decoder.version''', '''decoder.output_projection.weight''', '''_float_tensor''', '''encoder.embed_positions._float_tensor''', '''decoder.embed_positions._float_tensor''', ] for k in ignore_keys: state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : int ): _UpperCAmelCase : Optional[int] = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: _UpperCAmelCase : Any = s_dict.pop(UpperCamelCase__ ) elif "subsample" in key: _UpperCAmelCase : str = s_dict.pop(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = emb.weight.shape _UpperCAmelCase : Optional[int] = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = emb.weight.data return lin_layer def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' ) _UpperCAmelCase : Optional[int] = mam_aaa['''args'''] _UpperCAmelCase : Dict = mam_aaa['''model'''] _UpperCAmelCase : Any = state_dict['''decoder.output_projection.weight'''] remove_ignore_keys_(UpperCamelCase__ ) rename_keys(UpperCamelCase__ ) _UpperCAmelCase : str = state_dict['''decoder.embed_tokens.weight'''].shape[0] _UpperCAmelCase : Optional[int] = args.share_decoder_input_output_embed _UpperCAmelCase : Any = [int(UpperCamelCase__ ) for i in args.conv_kernel_sizes.split(''',''' )] _UpperCAmelCase : Dict = SpeechaTextConfig( vocab_size=UpperCamelCase__ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , num_conv_layers=len(UpperCamelCase__ ) , conv_channels=args.conv_channels , conv_kernel_sizes=UpperCamelCase__ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=UpperCamelCase__ , num_beams=5 , max_length=200 , use_cache=UpperCamelCase__ , decoder_start_token_id=2 , early_stopping=UpperCamelCase__ , ) _UpperCAmelCase : Tuple = SpeechaTextForConditionalGeneration(UpperCamelCase__ ) _UpperCAmelCase , _UpperCAmelCase : Optional[Any] = model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0 and not set(UpperCamelCase__ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( '''Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,''' F' but all the following weights are missing {missing}' ) if tie_embeds: _UpperCAmelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: _UpperCAmelCase : Any = lm_head_weights model.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() # Required parameters parser.add_argument('--fairseq_path', type=str, help='Path to the fairseq model (.pt) file.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _lowerCAmelCase :Optional[Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
263
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCAmelCase :Optional[Any] = False class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) _UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = generator.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = '''cyberpunk 2077''' _UpperCAmelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.dual_guided( prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images _UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger ''' _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.text_to_image( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images _UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images _UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
263
1
"""simple docstring""" import time from dataclasses import dataclass from multiprocessing import Pool from unittest import TestCase from unittest.mock import patch import multiprocess import numpy as np import pytest from datasets.utils.py_utils import ( NestedDataStructure, asdict, iflatmap_unordered, map_nested, temp_seed, temporary_assignment, zip_dict, ) from .utils import require_tf, require_torch def lowerCamelCase_ (UpperCamelCase__ : List[str] ): # picklable for multiprocessing return x.sum() def lowerCamelCase_ (UpperCamelCase__ : str ): # picklable for multiprocessing return i + 1 @dataclass class _UpperCAmelCase : '''simple docstring''' a__ =42 a__ =42 class _UpperCAmelCase ( a ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : str = {} _UpperCAmelCase : int = [] _UpperCAmelCase : Dict = 1 _UpperCAmelCase : Tuple = [1, 2] _UpperCAmelCase : Dict = {'''a''': 1, '''b''': 2} _UpperCAmelCase : Optional[int] = {'''a''': [1, 2], '''b''': [3, 4]} _UpperCAmelCase : str = {'''a''': {'''1''': 1}, '''b''': 2} _UpperCAmelCase : Optional[int] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4} _UpperCAmelCase : List[Any] = {} _UpperCAmelCase : Union[str, Any] = [] _UpperCAmelCase : Union[str, Any] = 2 _UpperCAmelCase : int = [2, 3] _UpperCAmelCase : str = {'''a''': 2, '''b''': 3} _UpperCAmelCase : Optional[Any] = {'''a''': [2, 3], '''b''': [4, 5]} _UpperCAmelCase : Union[str, Any] = {'''a''': {'''1''': 2}, '''b''': 3} _UpperCAmelCase : Dict = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5} self.assertEqual(map_nested(A , A ) , A ) self.assertEqual(map_nested(A , A ) , A ) self.assertEqual(map_nested(A , A ) , A ) self.assertEqual(map_nested(A , A ) , A ) self.assertEqual(map_nested(A , A ) , A ) self.assertEqual(map_nested(A , A ) , A ) self.assertEqual(map_nested(A , A ) , A ) self.assertEqual(map_nested(A , A ) , A ) _UpperCAmelCase : List[Any] = 2 self.assertEqual(map_nested(A , A , num_proc=A ) , A ) self.assertEqual(map_nested(A , A , num_proc=A ) , A ) self.assertEqual(map_nested(A , A , num_proc=A ) , A ) self.assertEqual(map_nested(A , A , num_proc=A ) , A ) self.assertEqual(map_nested(A , A , num_proc=A ) , A ) self.assertEqual(map_nested(A , A , num_proc=A ) , A ) self.assertEqual(map_nested(A , A , num_proc=A ) , A ) self.assertEqual(map_nested(A , A , num_proc=A ) , A ) _UpperCAmelCase : Tuple = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )} _UpperCAmelCase : Dict = {'''a''': 2, '''b''': 0, '''c''': 2} _UpperCAmelCase : str = { '''a''': np.eye(2 ).astype(A ), '''b''': np.zeros(3 ).astype(A ), '''c''': np.ones(2 ).astype(A ), } self.assertEqual(map_nested(A , A , map_numpy=A ) , A ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A , A , map_numpy=A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) self.assertEqual(map_nested(A , A , map_numpy=A , num_proc=A ) , A ) self.assertEqual( {k: v.tolist() for k, v in map_nested(A , A , map_numpy=A , num_proc=A ).items()} , {k: v.tolist() for k, v in expected_map_nested_sna_int.items()} , ) with self.assertRaises(A ): # can't pickle a local lambda map_nested(lambda A : x + 1 , A , num_proc=A ) def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Optional[Any] = {'''a''': 1, '''b''': 2} _UpperCAmelCase : Optional[Any] = {'''a''': 3, '''b''': 4} _UpperCAmelCase : Any = {'''a''': 5, '''b''': 6} _UpperCAmelCase : Dict = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] ) self.assertEqual(sorted(zip_dict(A , A , A ) ) , A ) def __lowerCAmelCase ( self ) -> Union[str, Any]: class _UpperCAmelCase : '''simple docstring''' a__ ='''bar''' _UpperCAmelCase : Tuple = Foo() self.assertEqual(foo.my_attr , '''bar''' ) with temporary_assignment(A , '''my_attr''' , '''BAR''' ): self.assertEqual(foo.my_attr , '''BAR''' ) self.assertEqual(foo.my_attr , '''bar''' ) @pytest.mark.parametrize( '''iterable_length, num_proc, expected_num_proc''' , [ (1, None, 1), (1, 1, 1), (2, None, 1), (2, 1, 1), (2, 2, 1), (2, 3, 1), (3, 2, 1), (16, 16, 16), (16, 17, 16), (17, 16, 16), ] , ) def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : Optional[int] ): with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch( '''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool: _UpperCAmelCase : List[Any] = {F'{i}': i for i in range(UpperCamelCase__ )} _UpperCAmelCase : Optional[Any] = map_nested(lambda UpperCamelCase__ : x + 10 , UpperCamelCase__ , num_proc=UpperCamelCase__ , parallel_min_length=16 ) if expected_num_proc == 1: assert mock_single_map_nested.called assert not mock_multiprocessing_pool.called else: assert not mock_single_map_nested.called assert mock_multiprocessing_pool.called assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc class _UpperCAmelCase ( a ): '''simple docstring''' @require_tf def __lowerCAmelCase ( self ) -> Union[str, Any]: import tensorflow as tf from tensorflow.keras import layers _UpperCAmelCase : Optional[int] = layers.Dense(2 ) def gen_random_output(): _UpperCAmelCase : Tuple = tf.random.uniform((1, 3) ) return model(A ).numpy() with temp_seed(4_2 , set_tensorflow=A ): _UpperCAmelCase : List[str] = gen_random_output() with temp_seed(4_2 , set_tensorflow=A ): _UpperCAmelCase : Optional[Any] = gen_random_output() _UpperCAmelCase : str = gen_random_output() np.testing.assert_equal(A , A ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @require_torch def __lowerCAmelCase ( self ) -> List[str]: import torch def gen_random_output(): _UpperCAmelCase : Tuple = torch.nn.Linear(3 , 2 ) _UpperCAmelCase : Tuple = torch.rand(1 , 3 ) return model(A ).detach().numpy() with temp_seed(4_2 , set_pytorch=A ): _UpperCAmelCase : Optional[Any] = gen_random_output() with temp_seed(4_2 , set_pytorch=A ): _UpperCAmelCase : Optional[Any] = gen_random_output() _UpperCAmelCase : str = gen_random_output() np.testing.assert_equal(A , A ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) def __lowerCAmelCase ( self ) -> Any: def gen_random_output(): return np.random.rand(1 , 3 ) with temp_seed(4_2 ): _UpperCAmelCase : List[str] = gen_random_output() with temp_seed(4_2 ): _UpperCAmelCase : int = gen_random_output() _UpperCAmelCase : Dict = gen_random_output() np.testing.assert_equal(A , A ) self.assertGreater(np.abs(outa - outa ).sum() , 0 ) @pytest.mark.parametrize('''input_data''' , [{}] ) def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : Optional[int] = NestedDataStructure(UpperCamelCase__ ).data assert output_data == input_data @pytest.mark.parametrize( '''data, expected_output''' , [ ({}, []), ([], []), ('''foo''', ['''foo''']), (['''foo''', '''bar'''], ['''foo''', '''bar''']), ([['''foo''', '''bar''']], ['''foo''', '''bar''']), ([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']), ([[['''foo'''], '''bar''']], ['''foo''', '''bar''']), ({'''a''': 1, '''b''': 2}, [1, 2]), ({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]), ({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]), ({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]), ({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]), ] , ) def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple ): _UpperCAmelCase : str = NestedDataStructure(UpperCamelCase__ ).flatten() assert output == expected_output def lowerCamelCase_ (): _UpperCAmelCase : Dict = A(x=1 , y='''foobar''' ) _UpperCAmelCase : str = {'''x''': 1, '''y''': '''foobar'''} assert asdict(UpperCamelCase__ ) == expected_output _UpperCAmelCase : int = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]} _UpperCAmelCase : Tuple = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]} assert asdict(UpperCamelCase__ ) == expected_output with pytest.raises(UpperCamelCase__ ): asdict([1, A(x=10 , y='''foo''' )] ) def lowerCamelCase_ (UpperCamelCase__ : str ): return text.split() def lowerCamelCase_ (UpperCamelCase__ : Tuple ): yield (time.time(), content) time.sleep(2 ) yield (time.time(), content) def lowerCamelCase_ (): with Pool(2 ) as pool: _UpperCAmelCase : Tuple = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(UpperCamelCase__ ) == 20 # check multiprocess from pathos (uses dill for pickling) with multiprocess.Pool(2 ) as pool: _UpperCAmelCase : str = list(iflatmap_unordered(UpperCamelCase__ , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) ) assert out.count('''hello''' ) == 10 assert out.count('''there''' ) == 10 assert len(UpperCamelCase__ ) == 20 # check that we get items as fast as possible with Pool(2 ) as pool: _UpperCAmelCase : Dict = [] for yield_time, content in iflatmap_unordered( UpperCamelCase__ , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ): assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded" out.append(UpperCamelCase__ ) assert out.count('''a''' ) == 2 assert out.count('''b''' ) == 2 assert len(UpperCamelCase__ ) == 4
263
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCAmelCase :Any = False @skip_mps class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ): '''simple docstring''' a__ =StableDiffusionAttendAndExcitePipeline a__ =False a__ =TEXT_TO_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) a__ =TEXT_TO_IMAGE_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __lowerCAmelCase ( cls ) -> List[str]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , ) _UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) _UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _UpperCAmelCase : List[str] = CLIPTextModel(A ) _UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCAmelCase : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: _UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : List[str] = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = '''cpu''' _UpperCAmelCase : Tuple = self.get_dummy_components() _UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Dict = self.get_dummy_inputs(A ) _UpperCAmelCase : Union[str, Any] = pipe(**A ).images _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) _UpperCAmelCase : int = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) _UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1E-3 ) def __lowerCAmelCase ( self ) -> Dict: super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> str: super().test_save_load_local(expected_max_difference=5E-4 ) def __lowerCAmelCase ( self ) -> Optional[int]: super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = torch.manual_seed(5_1 ) _UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses''' _UpperCAmelCase : int = [5, 7] _UpperCAmelCase : Dict = pipe( prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _UpperCAmelCase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
263
1
"""simple docstring""" import os import pytest import yaml from datasets.features.features import Features, Value from datasets.info import DatasetInfo, DatasetInfosDict @pytest.mark.parametrize( '''files''' , [ ['''full:README.md''', '''dataset_infos.json'''], ['''empty:README.md''', '''dataset_infos.json'''], ['''dataset_infos.json'''], ['''full:README.md'''], ] , ) def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : List[Any] = tmp_path_factory.mktemp('''dset_infos_dir''' ) if "full:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''---\ndataset_info:\n dataset_size: 42\n---''' ) if "empty:README.md" in files: with open(dataset_infos_dir / '''README.md''' , '''w''' ) as f: f.write('''''' ) # we want to support dataset_infos.json for backward compatibility if "dataset_infos.json" in files: with open(dataset_infos_dir / '''dataset_infos.json''' , '''w''' ) as f: f.write('''{"default": {"dataset_size": 42}}''' ) _UpperCAmelCase : Any = DatasetInfosDict.from_directory(UpperCamelCase__ ) assert dataset_infos assert dataset_infos["default"].dataset_size == 42 @pytest.mark.parametrize( '''dataset_info''' , [ DatasetInfo(), DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ), ] , ) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : DatasetInfo ): _UpperCAmelCase : List[Any] = str(UpperCamelCase__ ) dataset_info.write_to_directory(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = DatasetInfo.from_directory(UpperCamelCase__ ) assert dataset_info == reloaded assert os.path.exists(os.path.join(UpperCamelCase__ , '''dataset_info.json''' ) ) def lowerCamelCase_ (): _UpperCAmelCase : Optional[int] = DatasetInfo( description='''foo''' , citation='''bar''' , homepage='''https://foo.bar''' , license='''CC0''' , features=Features({'''a''': Value('''int32''' )} ) , post_processed={} , supervised_keys=() , task_templates=[] , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train''', '''num_examples''': 42}] , download_checksums={} , download_size=1337 , post_processing_size=442 , dataset_size=1234 , size_in_bytes=1337 + 442 + 1234 , ) _UpperCAmelCase : Union[str, Any] = dataset_info._to_yaml_dict() assert sorted(UpperCamelCase__ ) == sorted(DatasetInfo._INCLUDED_INFO_IN_YAML ) for key in DatasetInfo._INCLUDED_INFO_IN_YAML: assert key in dataset_info_yaml_dict assert isinstance(dataset_info_yaml_dict[key] , (list, dict, int, str) ) _UpperCAmelCase : Any = yaml.safe_dump(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = yaml.safe_load(UpperCamelCase__ ) assert dataset_info_yaml_dict == reloaded def lowerCamelCase_ (): _UpperCAmelCase : Any = DatasetInfo() _UpperCAmelCase : Optional[int] = dataset_info._to_yaml_dict() assert dataset_info_yaml_dict == {} @pytest.mark.parametrize( '''dataset_infos_dict''' , [ DatasetInfosDict(), DatasetInfosDict({'''default''': DatasetInfo()} ), DatasetInfosDict({'''my_config_name''': DatasetInfo()} ), DatasetInfosDict( { '''default''': DatasetInfo( description='''foo''' , features=Features({'''a''': Value('''int32''' )} ) , builder_name='''builder''' , config_name='''config''' , version='''1.0.0''' , splits=[{'''name''': '''train'''}] , download_size=42 , ) } ), DatasetInfosDict( { '''v1''': DatasetInfo(dataset_size=42 ), '''v2''': DatasetInfo(dataset_size=1337 ), } ), ] , ) def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : DatasetInfosDict ): _UpperCAmelCase : Union[str, Any] = str(UpperCamelCase__ ) dataset_infos_dict.write_to_directory(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = DatasetInfosDict.from_directory(UpperCamelCase__ ) # the config_name of the dataset_infos_dict take over the attribute for config_name, dataset_info in dataset_infos_dict.items(): _UpperCAmelCase : Any = config_name # the yaml representation doesn't include fields like description or citation # so we just test that we can recover what we can from the yaml _UpperCAmelCase : str = DatasetInfo._from_yaml_dict(dataset_info._to_yaml_dict() ) assert dataset_infos_dict == reloaded if dataset_infos_dict: assert os.path.exists(os.path.join(UpperCamelCase__ , '''README.md''' ) )
263
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[str] = -1 _UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : str = TextStreamer(A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : List[str] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[Any] = -1 _UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] ) _UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A ) _UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A ) thread.start() _UpperCAmelCase : Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Any = -1 _UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :] _UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Optional[int]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' ) _UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A ) _UpperCAmelCase : Tuple = -1 _UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A ) model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n" _UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Dict = -1 _UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 ) _UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(A ): _UpperCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
263
1
"""simple docstring""" from __future__ import annotations _lowerCAmelCase :str = [] def lowerCamelCase_ (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int , UpperCamelCase__ : int ): for i in range(len(UpperCamelCase__ ) ): if board[row][i] == 1: return False for i in range(len(UpperCamelCase__ ) ): if board[i][column] == 1: return False for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , -1 , -1 ) ): if board[i][j] == 1: return False for i, j in zip(range(UpperCamelCase__ , -1 , -1 ) , range(UpperCamelCase__ , len(UpperCamelCase__ ) ) ): if board[i][j] == 1: return False return True def lowerCamelCase_ (UpperCamelCase__ : list[list[int]] , UpperCamelCase__ : int ): if row >= len(UpperCamelCase__ ): solution.append(UpperCamelCase__ ) printboard(UpperCamelCase__ ) print() return True for i in range(len(UpperCamelCase__ ) ): if is_safe(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ): _UpperCAmelCase : Dict = 1 solve(UpperCamelCase__ , row + 1 ) _UpperCAmelCase : Dict = 0 return False def lowerCamelCase_ (UpperCamelCase__ : list[list[int]] ): for i in range(len(UpperCamelCase__ ) ): for j in range(len(UpperCamelCase__ ) ): if board[i][j] == 1: print('''Q''' , end=''' ''' ) else: print('''.''' , end=''' ''' ) print() # n=int(input("The no. of queens")) _lowerCAmelCase :Dict = 8 _lowerCAmelCase :Optional[Any] = [[0 for i in range(n)] for j in range(n)] solve(board, 0) print('The total no. of solutions are :', len(solution))
263
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_ (UpperCamelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0] def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
263
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = '''ZinengTang/tvlt-base''' _UpperCAmelCase : Dict = tempfile.mkdtemp() def __lowerCAmelCase ( self , **A ) -> int: return TvltImageProcessor.from_pretrained(self.checkpoint , **A ) def __lowerCAmelCase ( self , **A ) -> Optional[Any]: return TvltFeatureExtractor.from_pretrained(self.checkpoint , **A ) def __lowerCAmelCase ( self ) -> Optional[Any]: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : str = self.get_image_processor() _UpperCAmelCase : Optional[Any] = self.get_feature_extractor() _UpperCAmelCase : Tuple = TvltProcessor(image_processor=A , feature_extractor=A ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : int = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , A ) self.assertIsInstance(processor.image_processor , A ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : List[str] = self.get_image_processor() _UpperCAmelCase : int = self.get_feature_extractor() _UpperCAmelCase : List[str] = TvltProcessor(image_processor=A , feature_extractor=A ) _UpperCAmelCase : Tuple = np.ones([1_2_0_0_0] ) _UpperCAmelCase : Tuple = feature_extractor(A , return_tensors='''np''' ) _UpperCAmelCase : List[str] = processor(audio=A , return_tensors='''np''' ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Union[str, Any] = self.get_image_processor() _UpperCAmelCase : Dict = self.get_feature_extractor() _UpperCAmelCase : Dict = TvltProcessor(image_processor=A , feature_extractor=A ) _UpperCAmelCase : Optional[int] = np.ones([3, 2_2_4, 2_2_4] ) _UpperCAmelCase : Tuple = image_processor(A , return_tensors='''np''' ) _UpperCAmelCase : List[Any] = processor(images=A , return_tensors='''np''' ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Optional[int] = self.get_image_processor() _UpperCAmelCase : Union[str, Any] = self.get_feature_extractor() _UpperCAmelCase : Optional[int] = TvltProcessor(image_processor=A , feature_extractor=A ) _UpperCAmelCase : List[Any] = np.ones([1_2_0_0_0] ) _UpperCAmelCase : Tuple = np.ones([3, 2_2_4, 2_2_4] ) _UpperCAmelCase : int = processor(audio=A , images=A ) self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Any = self.get_image_processor() _UpperCAmelCase : int = self.get_feature_extractor() _UpperCAmelCase : List[str] = TvltProcessor(image_processor=A , feature_extractor=A ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" _UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:] _UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''WhisperFeatureExtractor''' a__ ='''WhisperTokenizer''' def __init__( self , A , A ) -> Optional[Any]: super().__init__(A , A ) _UpperCAmelCase : Optional[Any] = self.feature_extractor _UpperCAmelCase : List[Any] = False def __lowerCAmelCase ( self , A=None , A=None , A=True ) -> Optional[int]: return self.tokenizer.get_decoder_prompt_ids(task=A , language=A , no_timestamps=A ) def __call__( self , *A , **A ) -> Union[str, Any]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*A , **A ) _UpperCAmelCase : Tuple = kwargs.pop('''audio''' , A ) _UpperCAmelCase : str = kwargs.pop('''sampling_rate''' , A ) _UpperCAmelCase : Any = kwargs.pop('''text''' , A ) if len(A ) > 0: _UpperCAmelCase : List[str] = args[0] _UpperCAmelCase : List[str] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: _UpperCAmelCase : Dict = self.feature_extractor(A , *A , sampling_rate=A , **A ) if text is not None: _UpperCAmelCase : Tuple = self.tokenizer(A , **A ) if text is None: return inputs elif audio is None: return encodings else: _UpperCAmelCase : Tuple = encodings['''input_ids'''] return inputs def __lowerCAmelCase ( self , *A , **A ) -> Any: return self.tokenizer.batch_decode(*A , **A ) def __lowerCAmelCase ( self , *A , **A ) -> Dict: return self.tokenizer.decode(*A , **A ) def __lowerCAmelCase ( self , A , A="np" ) -> Optional[int]: return self.tokenizer.get_prompt_ids(A , return_tensors=A )
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCAmelCase :Any = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Tuple = ['DeiTFeatureExtractor'] _lowerCAmelCase :Union[str, Any] = ['DeiTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Dict = [ 'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'DeiTForImageClassification', 'DeiTForImageClassificationWithTeacher', 'DeiTForMaskedImageModeling', 'DeiTModel', 'DeiTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFDeiTForImageClassification', 'TFDeiTForImageClassificationWithTeacher', 'TFDeiTForMaskedImageModeling', 'TFDeiTModel', 'TFDeiTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_deit import DeiTFeatureExtractor from .image_processing_deit import DeiTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_deit import ( DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, DeiTForImageClassification, DeiTForImageClassificationWithTeacher, DeiTForMaskedImageModeling, DeiTModel, DeiTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_deit import ( TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, TFDeiTPreTrainedModel, ) else: import sys _lowerCAmelCase :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) _lowerCAmelCase :List[str] = '▁' _lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'} _lowerCAmelCase :List[Any] = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } _lowerCAmelCase :Tuple = { 'xlm-roberta-base': 512, 'xlm-roberta-large': 512, 'xlm-roberta-large-finetuned-conll02-dutch': 512, 'xlm-roberta-large-finetuned-conll02-spanish': 512, 'xlm-roberta-large-finetuned-conll03-english': 512, 'xlm-roberta-large-finetuned-conll03-german': 512, } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token _UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) _UpperCAmelCase : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset _UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = self.__dict__.copy() _UpperCAmelCase : List[str] = None _UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , A ) -> Optional[int]: _UpperCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self , A , A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : Any = [self.cls_token_id] _UpperCAmelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Dict = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , A ) -> List[str]: return self.sp_model.encode(A , out_type=A ) def __lowerCAmelCase ( self , A ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase : Any = self.sp_model.PieceToId(A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self , A ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , A ) -> int: _UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip() return out_string def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : List[Any] = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A ) elif not os.path.isfile(self.vocab_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : str = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
263
1
"""simple docstring""" from copy import deepcopy import torch import torch.nn.functional as F from torch.optim import AdamW from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from accelerate.accelerator import Accelerator from accelerate.state import GradientState from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import DistributedType, is_torch_version, set_seed def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple ): for param, grad_param in zip(model_a.parameters() , model_b.parameters() ): if not param.requires_grad: continue if not did_step: # Grads should not be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , grad_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})' def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=True ): model.train() _UpperCAmelCase : Union[str, Any] = model(UpperCamelCase__ ) _UpperCAmelCase : Dict = F.mse_loss(UpperCamelCase__ , target.to(output.device ) ) if not do_backward: loss /= accelerator.gradient_accumulation_steps loss.backward() else: accelerator.backward(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : Optional[Any]=False ): set_seed(42 ) _UpperCAmelCase : Any = RegressionModel() _UpperCAmelCase : List[str] = deepcopy(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = RegressionDataset(length=80 ) _UpperCAmelCase : Optional[Any] = DataLoader(UpperCamelCase__ , batch_size=16 ) model.to(accelerator.device ) if sched: _UpperCAmelCase : Optional[Any] = AdamW(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase : Optional[int] = AdamW(params=ddp_model.parameters() , lr=1E-3 ) _UpperCAmelCase : Union[str, Any] = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 ) _UpperCAmelCase : Any = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.65 ) # Make a copy of `model` if sched: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: _UpperCAmelCase , _UpperCAmelCase : Dict = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) if sched: return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched) return model, ddp_model, dataloader def lowerCamelCase_ (UpperCamelCase__ : List[Any] ): # Test when on a single CPU or GPU that the context manager does nothing _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = get_training_setup(UpperCamelCase__ ) # Use a single batch _UpperCAmelCase , _UpperCAmelCase : str = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase : int = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue assert torch.allclose( param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _UpperCAmelCase : Union[str, Any] = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def lowerCamelCase_ (UpperCamelCase__ : Dict ): # Test on distributed setup that context manager behaves properly _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = get_training_setup(UpperCamelCase__ ) # Use a single batch _UpperCAmelCase , _UpperCAmelCase : Optional[int] = next(iter(UpperCamelCase__ ) ).values() for iteration in range(3 ): # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase : Tuple = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase : Any = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) if iteration % 2 == 0: # Accumulate grads locally with accelerator.no_sync(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: # Sync grads step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if iteration % 2 == 0: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' else: # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _UpperCAmelCase : Tuple = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] def lowerCamelCase_ (UpperCamelCase__ : List[str]=False , UpperCamelCase__ : str=False ): _UpperCAmelCase : List[Any] = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = get_training_setup(UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): _UpperCAmelCase , _UpperCAmelCase : Dict = batch.values() # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase : Dict = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Do "gradient accumulation" (noop) with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # DDP model and model should only be in sync when not (iteration % 2 == 0) for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ): if not param.requires_grad: continue if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1): # Grads should be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is True ), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})' else: # Grads should not be in sync assert ( torch.allclose(param.grad , ddp_param.grad ) is False ), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})' # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) _UpperCAmelCase : str = ddp_input[torch.randperm(len(UpperCamelCase__ ) )] GradientState._reset_state() def lowerCamelCase_ (UpperCamelCase__ : List[str]=False , UpperCamelCase__ : Union[str, Any]=False ): _UpperCAmelCase : List[str] = Accelerator( split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 ) # Test that context manager behaves properly _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = get_training_setup(UpperCamelCase__ , UpperCamelCase__ ) for iteration, batch in enumerate(UpperCamelCase__ ): _UpperCAmelCase , _UpperCAmelCase : str = batch.values() # Gather the distributed inputs and targs for the base model _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.gather((ddp_input, ddp_target) ) _UpperCAmelCase , _UpperCAmelCase : List[str] = input.to(accelerator.device ), target.to(accelerator.device ) # Perform our initial ground truth step in non "DDP" model.train() ddp_model.train() step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) opt.step() if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )): if split_batches: sched.step() else: for _ in range(accelerator.num_processes ): sched.step() opt.zero_grad() # Perform gradient accumulation under wrapper with accelerator.accumulate(UpperCamelCase__ ): step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ddp_opt.step() ddp_sched.step() ddp_opt.zero_grad() # Learning rates should be the same assert ( opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"] ), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n' _UpperCAmelCase : List[Any] = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ )) if accelerator.num_processes > 1: check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Shuffle ddp_input on each iteration torch.manual_seed(1337 + iteration ) GradientState._reset_state() def lowerCamelCase_ (): _UpperCAmelCase : List[str] = Accelerator() _UpperCAmelCase : Union[str, Any] = RegressionDataset(length=80 ) _UpperCAmelCase : Any = DataLoader(UpperCamelCase__ , batch_size=16 ) _UpperCAmelCase : List[Any] = RegressionDataset(length=96 ) _UpperCAmelCase : Union[str, Any] = DataLoader(UpperCamelCase__ , batch_size=16 ) _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ ) assert accelerator.gradient_state.active_dataloader is None for iteration, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if iteration < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader if iteration == 1: for batch_num, _ in enumerate(UpperCamelCase__ ): assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ ) if batch_num < len(UpperCamelCase__ ) - 1: assert not accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader else: assert accelerator.gradient_state.end_of_dataloader assert accelerator.gradient_state.active_dataloader is None def lowerCamelCase_ (): _UpperCAmelCase : List[Any] = Accelerator() _UpperCAmelCase : str = accelerator.state if state.local_process_index == 0: print('''**Test `accumulate` gradient accumulation with dataloader break**''' ) test_dataloader_break() if state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print('''**Test NOOP `no_sync` context manager**''' ) test_noop_sync(UpperCamelCase__ ) if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU): if state.local_process_index == 0: print('''**Test Distributed `no_sync` context manager**''' ) test_distributed_sync(UpperCamelCase__ ) if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation, ''' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ ) # Currently will break on torch 2.0 +, need to investigate why if is_torch_version('''<''' , '''2.0''' ) or state.distributed_type == DistributedType.NO: if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , '''`split_batches=False`, `dispatch_batches=False`**''' , ) test_gradient_accumulation_with_opt_and_scheduler() if state.distributed_type == DistributedType.MULTI_GPU: for split_batch in [True, False]: for dispatch_batches in [True, False]: if not split_batch and not dispatch_batches: continue if state.local_process_index == 0: print( '''**Test `accumulate` gradient accumulation with optimizer and scheduler, ''' , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , ) test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
263
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , *A , **A ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , A , ) super().__init__(*A , **A )
263
1
"""simple docstring""" import unittest from knapsack import knapsack as k class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Any = 0 _UpperCAmelCase : Dict = [0] _UpperCAmelCase : Tuple = [0] _UpperCAmelCase : Union[str, Any] = len(A ) self.assertEqual(k.knapsack(A , A , A , A ) , 0 ) _UpperCAmelCase : Union[str, Any] = [6_0] _UpperCAmelCase : int = [1_0] _UpperCAmelCase : Union[str, Any] = len(A ) self.assertEqual(k.knapsack(A , A , A , A ) , 0 ) def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Tuple = 3 _UpperCAmelCase : Tuple = [1, 2, 3] _UpperCAmelCase : List[str] = [3, 2, 1] _UpperCAmelCase : Optional[int] = len(A ) self.assertEqual(k.knapsack(A , A , A , A ) , 5 ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = 5_0 _UpperCAmelCase : List[str] = [6_0, 1_0_0, 1_2_0] _UpperCAmelCase : Optional[int] = [1_0, 2_0, 3_0] _UpperCAmelCase : List[str] = len(A ) self.assertEqual(k.knapsack(A , A , A , A ) , 2_2_0 ) if __name__ == "__main__": unittest.main()
263
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ): # Load configuration defined in the metadata file with open(UpperCamelCase__ ) as metadata_file: _UpperCAmelCase : Dict = json.load(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' ) # Load the entity vocab file _UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens _UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name] _UpperCAmelCase : Tuple = state_dict[prefix + matrix_name] _UpperCAmelCase : str = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']] _UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval() _UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )): raise ValueError( '''Unexpected keys''' F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs _UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' ) _UpperCAmelCase : List[str] = ( '''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the''' ''' new world number one avoid a humiliating second- round exit at Wimbledon .''' ) _UpperCAmelCase : Dict = (39, 42) _UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' ) _UpperCAmelCase : List[Any] = model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": _UpperCAmelCase : str = torch.Size((1, 42, 1024) ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base _UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) ) _UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _UpperCAmelCase : int = torch.Size((1, 1, 1024) ) _UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base _UpperCAmelCase : List[str] = torch.Size((1, 1, 768) ) _UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ): _UpperCAmelCase : Any = {} with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(UpperCamelCase__ ): _UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' ) _UpperCAmelCase : Tuple = index return entity_vocab if __name__ == "__main__": _lowerCAmelCase :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) _lowerCAmelCase :Any = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
263
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() _lowerCAmelCase :Optional[Any] = logging.get_logger(__name__) def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : Any = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: _UpperCAmelCase : Optional[Any] = 192 _UpperCAmelCase : int = 768 _UpperCAmelCase : List[Any] = 12 _UpperCAmelCase : Dict = 3 _UpperCAmelCase : Any = [800, 1333] _UpperCAmelCase : Union[str, Any] = False elif yolos_name == "yolos_s_dWr": _UpperCAmelCase : str = 330 _UpperCAmelCase : Optional[Any] = 14 _UpperCAmelCase : int = 6 _UpperCAmelCase : Optional[int] = 1320 elif "yolos_s" in yolos_name: _UpperCAmelCase : Any = 384 _UpperCAmelCase : Dict = 1536 _UpperCAmelCase : Any = 12 _UpperCAmelCase : Optional[Any] = 6 elif "yolos_b" in yolos_name: _UpperCAmelCase : List[Any] = [800, 1344] _UpperCAmelCase : Dict = 91 _UpperCAmelCase : Optional[int] = '''huggingface/label-files''' _UpperCAmelCase : str = '''coco-detection-id2label.json''' _UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='''dataset''' ) , '''r''' ) ) _UpperCAmelCase : Dict = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} _UpperCAmelCase : Any = idalabel _UpperCAmelCase : Dict = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_ (UpperCamelCase__ : dict , UpperCamelCase__ : YolosConfig , UpperCamelCase__ : bool = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _UpperCAmelCase : List[Any] = state_dict.pop(F'blocks.{i}.attn.qkv.weight' ) _UpperCAmelCase : str = state_dict.pop(F'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict _UpperCAmelCase : Tuple = in_proj_weight[: config.hidden_size, :] _UpperCAmelCase : Any = in_proj_bias[: config.hidden_size] _UpperCAmelCase : Optional[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _UpperCAmelCase : List[Any] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _UpperCAmelCase : Optional[int] = in_proj_weight[-config.hidden_size :, :] _UpperCAmelCase : int = in_proj_bias[-config.hidden_size :] def lowerCamelCase_ (UpperCamelCase__ : str ): if "backbone" in name: _UpperCAmelCase : int = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: _UpperCAmelCase : int = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: _UpperCAmelCase : Optional[int] = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: _UpperCAmelCase : str = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: _UpperCAmelCase : Optional[Any] = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: _UpperCAmelCase : List[str] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: _UpperCAmelCase : Tuple = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: _UpperCAmelCase : int = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: _UpperCAmelCase : Dict = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: _UpperCAmelCase : Any = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: _UpperCAmelCase : Optional[Any] = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: _UpperCAmelCase : Any = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: _UpperCAmelCase : str = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: _UpperCAmelCase : str = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: _UpperCAmelCase : List[str] = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: _UpperCAmelCase : Any = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def lowerCamelCase_ (UpperCamelCase__ : dict , UpperCamelCase__ : YolosForObjectDetection ): for key in orig_state_dict.copy().keys(): _UpperCAmelCase : List[str] = orig_state_dict.pop(UpperCamelCase__ ) if "qkv" in key: _UpperCAmelCase : Tuple = key.split('''.''' ) _UpperCAmelCase : Dict = int(key_split[2] ) _UpperCAmelCase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: _UpperCAmelCase : List[Any] = val[:dim, :] _UpperCAmelCase : Optional[Any] = val[ dim : dim * 2, : ] _UpperCAmelCase : Any = val[-dim:, :] else: _UpperCAmelCase : Tuple = val[:dim] _UpperCAmelCase : Union[str, Any] = val[dim : dim * 2] _UpperCAmelCase : Optional[int] = val[-dim:] else: _UpperCAmelCase : Optional[Any] = val return orig_state_dict def lowerCamelCase_ (): _UpperCAmelCase : str = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _UpperCAmelCase : Optional[Any] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw ) return im @torch.no_grad() def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : bool = False ): _UpperCAmelCase : Any = get_yolos_config(UpperCamelCase__ ) # load original state_dict _UpperCAmelCase : Dict = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model'''] # load 🤗 model _UpperCAmelCase : str = YolosForObjectDetection(UpperCamelCase__ ) model.eval() _UpperCAmelCase : Union[str, Any] = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ ) # Check outputs on an image, prepared by YolosImageProcessor _UpperCAmelCase : Union[str, Any] = 800 if yolos_name != '''yolos_ti''' else 512 _UpperCAmelCase : Tuple = YolosImageProcessor(format='''coco_detection''' , size=UpperCamelCase__ ) _UpperCAmelCase : int = image_processor(images=prepare_img() , return_tensors='''pt''' ) _UpperCAmelCase : Dict = model(**UpperCamelCase__ ) _UpperCAmelCase , _UpperCAmelCase : List[str] = outputs.logits, outputs.pred_boxes _UpperCAmelCase , _UpperCAmelCase : List[str] = None, None if yolos_name == "yolos_ti": _UpperCAmelCase : str = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) _UpperCAmelCase : Any = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": _UpperCAmelCase : Optional[int] = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": _UpperCAmelCase : Union[str, Any] = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) _UpperCAmelCase : int = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": _UpperCAmelCase : Optional[int] = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) _UpperCAmelCase : List[Any] = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": _UpperCAmelCase : Dict = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) _UpperCAmelCase : Optional[int] = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(F'Unknown yolos_name: {yolos_name}' ) assert torch.allclose(logits[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ) Path(UpperCamelCase__ ).mkdir(exist_ok=UpperCamelCase__ ) print(F'Saving model {yolos_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(UpperCamelCase__ ) print(F'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(UpperCamelCase__ ) if push_to_hub: _UpperCAmelCase : List[str] = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) _UpperCAmelCase : Optional[Any] = model_mapping[yolos_name] image_processor.push_to_hub(UpperCamelCase__ , organization='''hustvl''' ) model.push_to_hub(UpperCamelCase__ , organization='''hustvl''' ) if __name__ == "__main__": _lowerCAmelCase :Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) _lowerCAmelCase :int = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
263
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowerCAmelCase :str = object() # For specifying empty leaf dict `{}` _lowerCAmelCase :str = object() def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ): _UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def lowerCamelCase_ (UpperCamelCase__ : List[str] ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def lowerCamelCase_ (): return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : List[str] = _get_partition_rules() _UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
263
1
"""simple docstring""" import json import os import unittest from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( a ,unittest.TestCase ): '''simple docstring''' a__ =GPTaTokenizer a__ =GPTaTokenizerFast a__ =True a__ ={'''add_prefix_space''': True} a__ =False def __lowerCAmelCase ( self ) -> Optional[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt _UpperCAmelCase : Union[str, Any] = [ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''\u0120''', '''\u0120l''', '''\u0120n''', '''\u0120lo''', '''\u0120low''', '''er''', '''\u0120lowest''', '''\u0120newer''', '''\u0120wider''', '''<unk>''', '''<|endoftext|>''', ] _UpperCAmelCase : List[str] = dict(zip(A , range(len(A ) ) ) ) _UpperCAmelCase : int = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', ''''''] _UpperCAmelCase : Dict = {'''unk_token''': '''<unk>'''} _UpperCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) _UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(A ) ) def __lowerCAmelCase ( self , **A ) -> Dict: kwargs.update(self.special_tokens_map ) return GPTaTokenizer.from_pretrained(self.tmpdirname , **A ) def __lowerCAmelCase ( self , **A ) -> List[str]: kwargs.update(self.special_tokens_map ) return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **A ) def __lowerCAmelCase ( self , A ) -> List[Any]: _UpperCAmelCase : List[Any] = '''lower newer''' _UpperCAmelCase : List[str] = '''lower newer''' return input_text, output_text def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) _UpperCAmelCase : Optional[Any] = '''lower newer''' _UpperCAmelCase : str = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er'''] _UpperCAmelCase : List[str] = tokenizer.tokenize(A , add_prefix_space=A ) self.assertListEqual(A , A ) _UpperCAmelCase : Tuple = tokens + [tokenizer.unk_token] _UpperCAmelCase : Optional[int] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def __lowerCAmelCase ( self ) -> str: if not self.test_rust_tokenizer: return _UpperCAmelCase : Union[str, Any] = self.get_tokenizer() _UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer(add_prefix_space=A ) _UpperCAmelCase : Any = '''lower newer''' # Testing tokenization _UpperCAmelCase : int = tokenizer.tokenize(A , add_prefix_space=A ) _UpperCAmelCase : List[str] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) # Testing conversion to ids without special tokens _UpperCAmelCase : Optional[int] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) _UpperCAmelCase : List[str] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) # Testing conversion to ids with special tokens _UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=A ) _UpperCAmelCase : List[str] = tokenizer.encode(A , add_prefix_space=A ) _UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) # Testing the unknown token _UpperCAmelCase : List[str] = tokens + [rust_tokenizer.unk_token] _UpperCAmelCase : List[str] = [1_4, 1_5, 1_0, 9, 3, 2, 1_5, 1_9] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(A ) , A ) def __lowerCAmelCase ( self , *A , **A ) -> Tuple: # It's very difficult to mix/test pretokenization with byte-level # And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string) pass def __lowerCAmelCase ( self , A=1_5 ) -> List[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ): _UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(A , **A ) # Simple input _UpperCAmelCase : Optional[int] = '''This is a simple input''' _UpperCAmelCase : Optional[Any] = ['''This is a simple input 1''', '''This is a simple input 2'''] _UpperCAmelCase : Dict = ('''This is a simple input''', '''This is a pair''') _UpperCAmelCase : Optional[int] = [ ('''This is a simple input 1''', '''This is a simple input 2'''), ('''This is a simple pair 1''', '''This is a simple pair 2'''), ] # Simple input tests self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' ) # Simple input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' ) # Simple input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , ) # Pair input self.assertRaises(A , tokenizer_r.encode , A , max_length=A , padding='''max_length''' ) # Pair input self.assertRaises(A , tokenizer_r.encode_plus , A , max_length=A , padding='''max_length''' ) # Pair input self.assertRaises( A , tokenizer_r.batch_encode_plus , A , max_length=A , padding='''max_length''' , ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : str = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' ) # Simple input _UpperCAmelCase : Optional[int] = '''This is a simple input''' _UpperCAmelCase : List[str] = ['''This is a simple input looooooooong''', '''This is a simple input'''] _UpperCAmelCase : List[Any] = ('''This is a simple input''', '''This is a pair''') _UpperCAmelCase : List[str] = [ ('''This is a simple input loooooong''', '''This is a simple input'''), ('''This is a simple pair loooooong''', '''This is a simple pair'''), ] _UpperCAmelCase : int = tokenizer.pad_token_id _UpperCAmelCase : str = tokenizer(A , padding='''max_length''' , max_length=3_0 , return_tensors='''np''' ) _UpperCAmelCase : List[str] = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' ) _UpperCAmelCase : List[str] = tokenizer(*A , padding='''max_length''' , max_length=6_0 , return_tensors='''np''' ) _UpperCAmelCase : Any = tokenizer(A , padding=A , truncate=A , return_tensors='''np''' ) # s # test single string max_length padding self.assertEqual(out_s['''input_ids'''].shape[-1] , 3_0 ) self.assertTrue(pad_token_id in out_s['''input_ids'''] ) self.assertTrue(0 in out_s['''attention_mask'''] ) # s2 # test automatic padding self.assertEqual(out_sa['''input_ids'''].shape[-1] , 3_3 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] ) self.assertFalse(0 in out_sa['''attention_mask'''][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] ) self.assertTrue(0 in out_sa['''attention_mask'''][1] ) # p # test single pair max_length padding self.assertEqual(out_p['''input_ids'''].shape[-1] , 6_0 ) self.assertTrue(pad_token_id in out_p['''input_ids'''] ) self.assertTrue(0 in out_p['''attention_mask'''] ) # p2 # test automatic padding pair self.assertEqual(out_pa['''input_ids'''].shape[-1] , 5_2 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] ) self.assertFalse(0 in out_pa['''attention_mask'''][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] ) self.assertTrue(0 in out_pa['''attention_mask'''][1] ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : Tuple = '''$$$''' _UpperCAmelCase : Tuple = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=A , add_bos_token=A ) _UpperCAmelCase : Optional[Any] = '''This is a simple input''' _UpperCAmelCase : Tuple = ['''This is a simple input 1''', '''This is a simple input 2'''] _UpperCAmelCase : str = tokenizer.bos_token_id _UpperCAmelCase : List[Any] = tokenizer(A ) _UpperCAmelCase : Union[str, Any] = tokenizer(A ) self.assertEqual(out_s.input_ids[0] , A ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) _UpperCAmelCase : Dict = tokenizer.decode(out_s.input_ids ) _UpperCAmelCase : Tuple = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] , A ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) def __lowerCAmelCase ( self ) -> Tuple: pass def __lowerCAmelCase ( self ) -> List[Any]: # TODO: change to self.get_tokenizers() when the fast version is implemented _UpperCAmelCase : Any = [self.get_tokenizer(do_lower_case=A , add_bos_token=A )] for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase : List[str] = '''Encode this.''' _UpperCAmelCase : List[Any] = '''This one too please.''' _UpperCAmelCase : str = tokenizer.encode(A , add_special_tokens=A ) encoded_sequence += tokenizer.encode(A , add_special_tokens=A ) _UpperCAmelCase : List[Any] = tokenizer.encode_plus( A , A , add_special_tokens=A , return_special_tokens_mask=A , ) _UpperCAmelCase : Optional[int] = encoded_sequence_dict['''input_ids'''] _UpperCAmelCase : str = encoded_sequence_dict['''special_tokens_mask'''] self.assertEqual(len(A ) , len(A ) ) _UpperCAmelCase : Union[str, Any] = [ (x if not special_tokens_mask[i] else None) for i, x in enumerate(A ) ] _UpperCAmelCase : Optional[int] = [x for x in filtered_sequence if x is not None] self.assertEqual(A , A ) @require_tokenizers class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> int: # More context: # https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1 # https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519 # https://github.com/huggingface/transformers/pull/17088#discussion_r871246439 _UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A ) _UpperCAmelCase : Tuple = '''A photo of a cat''' _UpperCAmelCase : str = tokenizer.encode( A , ) self.assertEqual(A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('''test_opt''' ) _UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('''./test_opt''' ) _UpperCAmelCase : Dict = tokenizer.encode( A , ) self.assertEqual(A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=A ) _UpperCAmelCase : Optional[Any] = '''A photo of a cat''' _UpperCAmelCase : Union[str, Any] = tokenizer.encode( A , ) # Same as above self.assertEqual(A , [2, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) @unittest.skip('''This test is failing because of a bug in the fast tokenizer''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : int = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=A ) _UpperCAmelCase : Optional[Any] = '''bos''' _UpperCAmelCase : Optional[int] = tokenizer.get_vocab()['''bos'''] _UpperCAmelCase : Tuple = '''A photo of a cat''' _UpperCAmelCase : Optional[Any] = tokenizer.encode( A , ) # We changed the bos token self.assertEqual(A , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] ) tokenizer.save_pretrained('''./tok''' ) _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''./tok''' ) self.assertTrue(tokenizer.is_fast ) _UpperCAmelCase : List[str] = tokenizer.encode( A , ) self.assertEqual(A , [3_1_9_5_7, 2_5_0, 1_3_4_5, 9, 1_0, 4_7_5_8] )
263
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : str = pipeline( task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = pipeline( task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , ) # This is an audio of a dog _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ] , ) _UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) _UpperCAmelCase : Tuple = audio_classifier( [audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> int: pass
263
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _lowerCAmelCase :Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _UpperCAmelCase ( a ): '''simple docstring''' a__ =['''pixel_values'''] def __init__( self , A = True , A = None , A = PILImageResampling.BICUBIC , A = True , A = None , A = True , A = 1 / 2_5_5 , A = True , A = None , A = None , A = True , **A , ) -> None: super().__init__(**A ) _UpperCAmelCase : Optional[Any] = size if size is not None else {'''shortest_edge''': 2_2_4} _UpperCAmelCase : List[str] = get_size_dict(A , default_to_square=A ) _UpperCAmelCase : str = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4} _UpperCAmelCase : List[str] = get_size_dict(A , default_to_square=A , param_name='''crop_size''' ) _UpperCAmelCase : Union[str, Any] = do_resize _UpperCAmelCase : int = size _UpperCAmelCase : List[Any] = resample _UpperCAmelCase : Union[str, Any] = do_center_crop _UpperCAmelCase : str = crop_size _UpperCAmelCase : Optional[Any] = do_rescale _UpperCAmelCase : int = rescale_factor _UpperCAmelCase : Union[str, Any] = do_normalize _UpperCAmelCase : Optional[int] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN _UpperCAmelCase : Optional[Any] = image_std if image_std is not None else OPENAI_CLIP_STD _UpperCAmelCase : Optional[int] = do_convert_rgb def __lowerCAmelCase ( self , A , A , A = PILImageResampling.BICUBIC , A = None , **A , ) -> np.ndarray: _UpperCAmelCase : Tuple = get_size_dict(A , default_to_square=A ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) _UpperCAmelCase : Union[str, Any] = get_resize_output_image_size(A , size=size['''shortest_edge'''] , default_to_square=A ) return resize(A , size=A , resample=A , data_format=A , **A ) def __lowerCAmelCase ( self , A , A , A = None , **A , ) -> np.ndarray: _UpperCAmelCase : List[Any] = get_size_dict(A ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(A , size=(size['''height'''], size['''width''']) , data_format=A , **A ) def __lowerCAmelCase ( self , A , A , A = None , **A , ) -> Optional[Any]: return rescale(A , scale=A , data_format=A , **A ) def __lowerCAmelCase ( self , A , A , A , A = None , **A , ) -> np.ndarray: return normalize(A , mean=A , std=A , data_format=A , **A ) def __lowerCAmelCase ( self , A , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = ChannelDimension.FIRST , **A , ) -> PIL.Image.Image: _UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize _UpperCAmelCase : Optional[int] = size if size is not None else self.size _UpperCAmelCase : Any = get_size_dict(A , param_name='''size''' , default_to_square=A ) _UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample _UpperCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop _UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size _UpperCAmelCase : Dict = get_size_dict(A , param_name='''crop_size''' , default_to_square=A ) _UpperCAmelCase : Any = do_rescale if do_rescale is not None else self.do_rescale _UpperCAmelCase : str = rescale_factor if rescale_factor is not None else self.rescale_factor _UpperCAmelCase : List[str] = do_normalize if do_normalize is not None else self.do_normalize _UpperCAmelCase : str = image_mean if image_mean is not None else self.image_mean _UpperCAmelCase : Optional[int] = image_std if image_std is not None else self.image_std _UpperCAmelCase : Tuple = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb _UpperCAmelCase : Any = make_list_of_images(A ) if not valid_images(A ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: _UpperCAmelCase : int = [convert_to_rgb(A ) for image in images] # All transformations expect numpy arrays. _UpperCAmelCase : str = [to_numpy_array(A ) for image in images] if do_resize: _UpperCAmelCase : str = [self.resize(image=A , size=A , resample=A ) for image in images] if do_center_crop: _UpperCAmelCase : List[str] = [self.center_crop(image=A , size=A ) for image in images] if do_rescale: _UpperCAmelCase : Union[str, Any] = [self.rescale(image=A , scale=A ) for image in images] if do_normalize: _UpperCAmelCase : Any = [self.normalize(image=A , mean=A , std=A ) for image in images] _UpperCAmelCase : int = [to_channel_dimension_format(A , A ) for image in images] _UpperCAmelCase : Any = {'''pixel_values''': images} return BatchFeature(data=A , tensor_type=A )
263
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCAmelCase :Tuple = logging.getLogger(__name__) def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ): def get_dataset(UpperCamelCase__ : List[str] ): _UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ): _UpperCAmelCase : Tuple = [] for epoch in range(UpperCamelCase__ ): # Train quickly model.train() for batch in dataloader: _UpperCAmelCase , _UpperCAmelCase : Dict = batch _UpperCAmelCase : int = model(UpperCamelCase__ ) _UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ ) accelerator.backward(UpperCamelCase__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self ) -> List[Any]: super().__init__() _UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) ) _UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) ) def __lowerCAmelCase ( self , A ) -> Tuple: return x * self.a + self.b class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __lowerCAmelCase ( self ) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Optional[Any] = DummyModel() _UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders() # Train baseline _UpperCAmelCase : Optional[int] = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare( A , A , A , A ) # Save initial _UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' ) accelerator.save_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() _UpperCAmelCase : Tuple = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : List[Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : Dict = DummyModel() _UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders() _UpperCAmelCase : Tuple = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A ) accelerator.load_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : List[str] = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A ) # Save everything _UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' ) accelerator.save_state(A ) # Load everything back in and make sure all states work accelerator.load_state(A ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() _UpperCAmelCase : int = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Union[str, Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A ) _UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : List[str] = train(2 , A , A , A , A ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item() _UpperCAmelCase : Tuple = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] ) _UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] ) _UpperCAmelCase : Optional[int] = DummyModel() _UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() ) _UpperCAmelCase : Optional[int] = Accelerator() with self.assertRaises(A ) as ve: accelerator.register_for_checkpointing(A , A , A , A ) _UpperCAmelCase : Dict = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Tuple = DummyModel() _UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 ) _UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A , A ) # Save initial accelerator.save_state() _UpperCAmelCase : List[str] = scheduler.state_dict() train(3 , A , A , A , A , A ) self.assertNotEqual(A , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(A , scheduler.state_dict() ) def __lowerCAmelCase ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase : Optional[Any] = accelerator.prepare(A ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(A , env=os.environ.copy() ) if __name__ == "__main__": _lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing' _lowerCAmelCase :Any = DummyModel() _lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3) _lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders() _lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCAmelCase :int = group['params'][0].device break assert param_device.type == accelerator.device.type _lowerCAmelCase :Dict = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: _lowerCAmelCase :List[Any] = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: _lowerCAmelCase :Union[str, Any] = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
263
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :Dict = logging.get_logger(__name__) _lowerCAmelCase :Optional[Any] = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'} class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''ctrl''' a__ =['''past_key_values'''] a__ ={ '''max_position_embeddings''': '''n_positions''', '''hidden_size''': '''n_embd''', '''num_attention_heads''': '''n_head''', '''num_hidden_layers''': '''n_layer''', } def __init__( self , A=2_4_6_5_3_4 , A=2_5_6 , A=1_2_8_0 , A=8_1_9_2 , A=4_8 , A=1_6 , A=0.1 , A=0.1 , A=1E-6 , A=0.02 , A=True , **A , ) -> str: _UpperCAmelCase : List[str] = vocab_size _UpperCAmelCase : Dict = n_positions _UpperCAmelCase : Dict = n_embd _UpperCAmelCase : List[Any] = n_layer _UpperCAmelCase : Dict = n_head _UpperCAmelCase : List[str] = dff _UpperCAmelCase : List[str] = resid_pdrop _UpperCAmelCase : int = embd_pdrop _UpperCAmelCase : Dict = layer_norm_epsilon _UpperCAmelCase : Union[str, Any] = initializer_range _UpperCAmelCase : Optional[Any] = use_cache super().__init__(**A )
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase :str = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :str = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" import inspect import re from hashlib import shaaaa from typing import Dict, List from .arrow import arrow from .audiofolder import audiofolder from .csv import csv from .imagefolder import imagefolder from .json import json from .pandas import pandas from .parquet import parquet from .sql import sql # noqa F401 from .text import text def lowerCamelCase_ (UpperCamelCase__ : List[str] ): _UpperCAmelCase : Any = [] for line in lines: _UpperCAmelCase : int = re.sub(r'''#.*''' , '''''' , UpperCamelCase__ ) # remove comments if line: filtered_lines.append(UpperCamelCase__ ) _UpperCAmelCase : Dict = '''\n'''.join(UpperCamelCase__ ) # Make a hash from all this code _UpperCAmelCase : Any = full_str.encode('''utf-8''' ) return shaaaa(UpperCamelCase__ ).hexdigest() # get importable module names and hash for caching _lowerCAmelCase :List[str] = { 'csv': (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())), 'json': (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())), 'pandas': (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())), 'parquet': (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())), 'arrow': (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())), 'text': (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())), 'imagefolder': (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())), 'audiofolder': (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())), } # Used to infer the module to use based on the data files extensions _lowerCAmelCase :Optional[Any] = { '.csv': ('csv', {}), '.tsv': ('csv', {'sep': '\t'}), '.json': ('json', {}), '.jsonl': ('json', {}), '.parquet': ('parquet', {}), '.arrow': ('arrow', {}), '.txt': ('text', {}), } _EXTENSION_TO_MODULE.update({ext: ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('imagefolder', {}) for ext in imagefolder.ImageFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext: ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _EXTENSION_TO_MODULE.update({ext.upper(): ('audiofolder', {}) for ext in audiofolder.AudioFolder.EXTENSIONS}) _lowerCAmelCase :str = {'imagefolder', 'audiofolder'} # Used to filter data files based on extensions given a module name _lowerCAmelCase :Dict[str, List[str]] = {} for _ext, (_module, _) in _EXTENSION_TO_MODULE.items(): _MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext) _MODULE_TO_EXTENSIONS["imagefolder"].append('.zip') _MODULE_TO_EXTENSIONS["audiofolder"].append('.zip')
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins _lowerCAmelCase :List[str] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec'] def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ): # Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit") for item in items: if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ): continue item.add_marker(pytest.mark.unit ) def lowerCamelCase_ (UpperCamelCase__ : Dict ): config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' ) @pytest.fixture(autouse=UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : str ): # test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work? _UpperCAmelCase : Union[str, Any] = tmp_path_factory.getbasetemp() / '''cache''' _UpperCAmelCase : Any = test_hf_cache_home / '''datasets''' _UpperCAmelCase : Dict = test_hf_cache_home / '''metrics''' _UpperCAmelCase : Tuple = test_hf_cache_home / '''modules''' monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(UpperCamelCase__ ) ) monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(UpperCamelCase__ ) ) monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(UpperCamelCase__ ) ) _UpperCAmelCase : Union[str, Any] = test_hf_datasets_cache / '''downloads''' monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(UpperCamelCase__ ) ) _UpperCAmelCase : Union[str, Any] = test_hf_datasets_cache / '''downloads''' / '''extracted''' monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) ) @pytest.fixture(autouse=UpperCamelCase__ , scope='''session''' ) def lowerCamelCase_ (): datasets.disable_progress_bar() @pytest.fixture(autouse=UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : int ): # don't take tests into account when counting downloads monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , UpperCamelCase__ ) @pytest.fixture def lowerCamelCase_ (UpperCamelCase__ : Any ): # Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0 # To be removed once SQLAlchemy 2.0 supported monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , UpperCamelCase__ )
263
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =IFImgaImgSuperResolutionPipeline a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Any = torch.manual_seed(A ) else: _UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
263
1
"""simple docstring""" import argparse import collections import torch from flax import traverse_util from tax import checkpoints from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : int , UpperCamelCase__ : List[Any]="attention" ): _UpperCAmelCase : List[str] = params[F'{prefix}/layers_{i}/{layer_name}/key/kernel'] _UpperCAmelCase : str = params[F'{prefix}/layers_{i}/{layer_name}/out/kernel'] _UpperCAmelCase : Tuple = params[F'{prefix}/layers_{i}/{layer_name}/query/kernel'] _UpperCAmelCase : Any = params[F'{prefix}/layers_{i}/{layer_name}/value/kernel'] return k, o, q, v def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : Tuple=False ): if split_mlp_wi: _UpperCAmelCase : Dict = params[F'{prefix}/layers_{i}/mlp/wi_0/kernel'] _UpperCAmelCase : str = params[F'{prefix}/layers_{i}/mlp/wi_1/kernel'] _UpperCAmelCase : Any = (wi_a, wi_a) else: _UpperCAmelCase : Tuple = params[F'{prefix}/layers_{i}/mlp/wi/kernel'] _UpperCAmelCase : Optional[Any] = params[F'{prefix}/layers_{i}/mlp/wo/kernel'] return wi, wo def lowerCamelCase_ (UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ): return params[F'{prefix}/layers_{i}/{layer_name}/scale'] def lowerCamelCase_ (UpperCamelCase__ : dict , *, UpperCamelCase__ : int , UpperCamelCase__ : bool ): _UpperCAmelCase : Any = traverse_util.flatten_dict(variables['''target'''] ) _UpperCAmelCase : str = {'''/'''.join(UpperCamelCase__ ): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi _UpperCAmelCase : Optional[int] = '''encoder/layers_0/mlp/wi_0/kernel''' in old print('''Split MLP:''' , UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = collections.OrderedDict() # Shared embeddings. _UpperCAmelCase : int = old['''token_embedder/embedding'''] # Encoder. for i in range(UpperCamelCase__ ): # Block i, layer 0 (Self Attention). _UpperCAmelCase : Tuple = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , '''pre_attention_layer_norm''' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , '''attention''' ) _UpperCAmelCase : List[str] = layer_norm _UpperCAmelCase : Union[str, Any] = k.T _UpperCAmelCase : str = o.T _UpperCAmelCase : Tuple = q.T _UpperCAmelCase : List[str] = v.T # Block i, layer 1 (MLP). _UpperCAmelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , '''pre_mlp_layer_norm''' ) _UpperCAmelCase , _UpperCAmelCase : Optional[int] = tax_mlp_lookup(UpperCamelCase__ , UpperCamelCase__ , '''encoder''' , UpperCamelCase__ ) _UpperCAmelCase : Tuple = layer_norm if split_mlp_wi: _UpperCAmelCase : List[Any] = wi[0].T _UpperCAmelCase : List[Any] = wi[1].T else: _UpperCAmelCase : Tuple = wi.T _UpperCAmelCase : Union[str, Any] = wo.T _UpperCAmelCase : Optional[int] = old[ '''encoder/relpos_bias/rel_embedding''' ].T _UpperCAmelCase : Tuple = old['''encoder/encoder_norm/scale'''] if not is_encoder_only: # Decoder. for i in range(UpperCamelCase__ ): # Block i, layer 0 (Self Attention). _UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''pre_self_attention_layer_norm''' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Tuple = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''self_attention''' ) _UpperCAmelCase : List[Any] = layer_norm _UpperCAmelCase : List[str] = k.T _UpperCAmelCase : Any = o.T _UpperCAmelCase : Any = q.T _UpperCAmelCase : int = v.T # Block i, layer 1 (Cross Attention). _UpperCAmelCase : Optional[int] = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''pre_cross_attention_layer_norm''' ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''encoder_decoder_attention''' ) _UpperCAmelCase : Dict = layer_norm _UpperCAmelCase : Tuple = k.T _UpperCAmelCase : Optional[int] = o.T _UpperCAmelCase : Optional[Any] = q.T _UpperCAmelCase : Tuple = v.T # Block i, layer 2 (MLP). _UpperCAmelCase : Optional[Any] = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , '''pre_mlp_layer_norm''' ) _UpperCAmelCase , _UpperCAmelCase : str = tax_mlp_lookup(UpperCamelCase__ , UpperCamelCase__ , '''decoder''' , UpperCamelCase__ ) _UpperCAmelCase : Dict = layer_norm if split_mlp_wi: _UpperCAmelCase : Optional[Any] = wi[0].T _UpperCAmelCase : Union[str, Any] = wi[1].T else: _UpperCAmelCase : int = wi.T _UpperCAmelCase : List[Any] = wo.T _UpperCAmelCase : Any = old['''decoder/decoder_norm/scale'''] _UpperCAmelCase : Dict = old[ '''decoder/relpos_bias/rel_embedding''' ].T # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: _UpperCAmelCase : Union[str, Any] = old['''decoder/logits_dense/kernel'''].T return new def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : bool ): _UpperCAmelCase : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] ) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: _UpperCAmelCase : int = state_dict['''shared.weight'''] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: _UpperCAmelCase : Any = state_dict['''shared.weight'''] if "lm_head.weight" not in state_dict: # For old 1.0 models. print('''Using shared word embeddings as lm_head.''' ) _UpperCAmelCase : int = state_dict['''shared.weight'''] return state_dict def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : int = checkpoints.load_tax_checkpoint(UpperCamelCase__ ) _UpperCAmelCase : int = convert_tax_to_pytorch(UpperCamelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCamelCase__ ) _UpperCAmelCase : str = make_state_dict(UpperCamelCase__ , UpperCamelCase__ ) model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : bool = False ): _UpperCAmelCase : Any = TaConfig.from_json_file(UpperCamelCase__ ) print(F'Building PyTorch model from configuration: {config}' ) # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: _UpperCAmelCase : str = TaEncoderModel(UpperCamelCase__ ) else: _UpperCAmelCase : Dict = TaForConditionalGeneration(UpperCamelCase__ ) # Load weights from tf checkpoint load_tax_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) model.save_pretrained(UpperCamelCase__ ) # Verify that we can load the checkpoint. model.from_pretrained(UpperCamelCase__ ) print('''Done''' ) if __name__ == "__main__": _lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) _lowerCAmelCase :Tuple = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only )
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) _UpperCAmelCase : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" from __future__ import annotations from typing import Any def lowerCamelCase_ (UpperCamelCase__ : list ): if not postfix_notation: return 0 _UpperCAmelCase : Optional[Any] = {'''+''', '''-''', '''*''', '''/'''} _UpperCAmelCase : list[Any] = [] for token in postfix_notation: if token in operations: _UpperCAmelCase , _UpperCAmelCase : Optional[int] = stack.pop(), stack.pop() if token == "+": stack.append(a + b ) elif token == "-": stack.append(a - b ) elif token == "*": stack.append(a * b ) else: if a * b < 0 and a % b != 0: stack.append(a // b + 1 ) else: stack.append(a // b ) else: stack.append(int(UpperCamelCase__ ) ) return stack.pop() if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ ) _UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model'''] _UpperCAmelCase : Optional[Any] = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase : Any = {} _UpperCAmelCase : Any = '''first_stage_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase : Tuple = {} _UpperCAmelCase : int = '''model.diffusion_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] _UpperCAmelCase : List[str] = config.model.params.first_stage_config.params _UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params _UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : int = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) _lowerCAmelCase :List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
263
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowerCAmelCase :List[Any] = {'configuration_swin': ['SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SwinConfig', 'SwinOnnxConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Dict = [ 'SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'SwinForImageClassification', 'SwinForMaskedImageModeling', 'SwinModel', 'SwinPreTrainedModel', 'SwinBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :List[str] = [ 'TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFSwinForImageClassification', 'TFSwinForMaskedImageModeling', 'TFSwinModel', 'TFSwinPreTrainedModel', ] if TYPE_CHECKING: from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swin import ( SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, SwinBackbone, SwinForImageClassification, SwinForMaskedImageModeling, SwinModel, SwinPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, TFSwinPreTrainedModel, ) else: import sys _lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) _lowerCAmelCase :Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''falcon''' a__ =['''past_key_values'''] def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any: _UpperCAmelCase : int = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A ) _UpperCAmelCase : int = hidden_size if n_embed is None else n_embed _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Optional[int] = use_cache _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase : Dict = alibi _UpperCAmelCase : Optional[int] = new_decoder_architecture _UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase : Optional[int] = parallel_attn _UpperCAmelCase : Optional[int] = bias super().__init__(bos_token_id=A , eos_token_id=A , **A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> List[Any]: return not self.alibi
263
1
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCAmelCase :Optional[Any] = False class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) _UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = generator.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = '''cyberpunk 2077''' _UpperCAmelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.dual_guided( prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images _UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger ''' _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.text_to_image( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images _UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images _UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
263
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[Any] = logging.get_logger(__name__) _lowerCAmelCase :Optional[Any] = { 'Salesforce/blip-vqa-base': 'https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json', 'Salesforce/blip-vqa-capfit-large': ( 'https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-base': ( 'https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json' ), 'Salesforce/blip-image-captioning-large': ( 'https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json' ), 'Salesforce/blip-itm-base-coco': 'https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json', 'Salesforce/blip-itm-large-coco': 'https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json', 'Salesforce/blip-itm-base-flikr': 'https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json', 'Salesforce/blip-itm-large-flikr': ( 'https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json' ), } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''blip_text_model''' def __init__( self , A=3_0_5_2_4 , A=7_6_8 , A=7_6_8 , A=3_0_7_2 , A=7_6_8 , A=1_2 , A=8 , A=5_1_2 , A="gelu" , A=1E-12 , A=0.0 , A=0.0 , A=0.02 , A=3_0_5_2_2 , A=2 , A=0 , A=1_0_2 , A=True , A=True , **A , ) -> List[str]: super().__init__( pad_token_id=A , bos_token_id=A , eos_token_id=A , sep_token_id=A , **A , ) _UpperCAmelCase : Optional[Any] = vocab_size _UpperCAmelCase : Any = hidden_size _UpperCAmelCase : List[Any] = encoder_hidden_size _UpperCAmelCase : int = intermediate_size _UpperCAmelCase : Dict = projection_dim _UpperCAmelCase : Optional[Any] = hidden_dropout_prob _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : int = num_attention_heads _UpperCAmelCase : int = max_position_embeddings _UpperCAmelCase : Optional[Any] = layer_norm_eps _UpperCAmelCase : Optional[Any] = hidden_act _UpperCAmelCase : List[Any] = initializer_range _UpperCAmelCase : Union[str, Any] = attention_probs_dropout_prob _UpperCAmelCase : List[Any] = is_decoder _UpperCAmelCase : Any = use_cache @classmethod def __lowerCAmelCase ( cls , A , **A ) -> "PretrainedConfig": cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : int = cls.get_config_dict(A , **A ) # get the text config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": _UpperCAmelCase : str = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''blip_vision_model''' def __init__( self , A=7_6_8 , A=3_0_7_2 , A=5_1_2 , A=1_2 , A=1_2 , A=3_8_4 , A=1_6 , A="gelu" , A=1E-5 , A=0.0 , A=1E-10 , **A , ) -> Union[str, Any]: super().__init__(**A ) _UpperCAmelCase : Union[str, Any] = hidden_size _UpperCAmelCase : str = intermediate_size _UpperCAmelCase : Optional[int] = projection_dim _UpperCAmelCase : Optional[int] = num_hidden_layers _UpperCAmelCase : Optional[Any] = num_attention_heads _UpperCAmelCase : Optional[int] = patch_size _UpperCAmelCase : str = image_size _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : List[Any] = attention_dropout _UpperCAmelCase : Optional[Any] = layer_norm_eps _UpperCAmelCase : Dict = hidden_act @classmethod def __lowerCAmelCase ( cls , A , **A ) -> "PretrainedConfig": cls._set_token_in_kwargs(A ) _UpperCAmelCase , _UpperCAmelCase : Any = cls.get_config_dict(A , **A ) # get the vision config dict if we are loading from BlipConfig if config_dict.get('''model_type''' ) == "blip": _UpperCAmelCase : Union[str, Any] = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(A , **A ) class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''blip''' a__ =True def __init__( self , A=None , A=None , A=5_1_2 , A=2.6_592 , A=2_5_6 , **A , ) -> Dict: super().__init__(**A ) if text_config is None: _UpperCAmelCase : Union[str, Any] = {} logger.info('''`text_config` is `None`. Initializing the `BlipTextConfig` with default values.''' ) if vision_config is None: _UpperCAmelCase : Optional[int] = {} logger.info('''`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.''' ) _UpperCAmelCase : Optional[Any] = BlipTextConfig(**A ) _UpperCAmelCase : List[Any] = BlipVisionConfig(**A ) _UpperCAmelCase : Tuple = self.vision_config.hidden_size _UpperCAmelCase : Any = projection_dim _UpperCAmelCase : List[Any] = logit_scale_init_value _UpperCAmelCase : Tuple = 1.0 _UpperCAmelCase : Any = 0.02 _UpperCAmelCase : List[Any] = image_text_hidden_size @classmethod def __lowerCAmelCase ( cls , A , A , **A ) -> List[Any]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A ) def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : int = copy.deepcopy(self.__dict__ ) _UpperCAmelCase : Tuple = self.text_config.to_dict() _UpperCAmelCase : Optional[int] = self.vision_config.to_dict() _UpperCAmelCase : Any = self.__class__.model_type return output
263
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase :Tuple = tuple[int, int] class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : set[int] = vertices _UpperCAmelCase : dict[EdgeT, int] = { (min(A ), max(A )): weight for edge, weight in edges.items() } def __lowerCAmelCase ( self , A , A ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _UpperCAmelCase : List[Any] = weight def __lowerCAmelCase ( self ) -> Graph: _UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} ) _UpperCAmelCase : EdgeT _UpperCAmelCase : int _UpperCAmelCase : EdgeT _UpperCAmelCase : int while len(subgraph.vertices ) < len(self.vertices ): _UpperCAmelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _UpperCAmelCase : Tuple = edge _UpperCAmelCase : Optional[int] = weight subgraph.add_edge(A , A ) return subgraph def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ): _UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : dict[EdgeT, int] = {} _UpperCAmelCase : list[str] _UpperCAmelCase : int _UpperCAmelCase : int with open(UpperCamelCase__ ) as f: _UpperCAmelCase : str = f.read().strip().split('''\n''' ) _UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": _UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] ) _UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ ) _UpperCAmelCase : Graph = graph.prims_algorithm() _UpperCAmelCase : int = sum(graph.edges.values() ) _UpperCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
263
1
"""simple docstring""" from collections import defaultdict from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst def lowerCamelCase_ (): _UpperCAmelCase , _UpperCAmelCase : str = 9, 14 # noqa: F841 _UpperCAmelCase : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] _UpperCAmelCase : Union[str, Any] = defaultdict(UpperCamelCase__ ) for nodea, nodea, cost in edges: adjancency[nodea].append([nodea, cost] ) adjancency[nodea].append([nodea, cost] ) _UpperCAmelCase : Tuple = mst(UpperCamelCase__ ) _UpperCAmelCase : Dict = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] for answer in expected: _UpperCAmelCase : int = tuple(answer[:2] ) _UpperCAmelCase : Any = tuple(edge[::-1] ) assert edge in result or reverse in result
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''mgp-str''' def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]: super().__init__(**A ) _UpperCAmelCase : Any = image_size _UpperCAmelCase : str = patch_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Dict = max_token_length _UpperCAmelCase : Optional[Any] = num_character_labels _UpperCAmelCase : int = num_bpe_labels _UpperCAmelCase : List[str] = num_wordpiece_labels _UpperCAmelCase : Optional[int] = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : List[Any] = mlp_ratio _UpperCAmelCase : List[str] = distilled _UpperCAmelCase : Optional[int] = layer_norm_eps _UpperCAmelCase : str = drop_rate _UpperCAmelCase : List[Any] = qkv_bias _UpperCAmelCase : List[str] = attn_drop_rate _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : Union[str, Any] = output_aa_attentions _UpperCAmelCase : List[str] = initializer_range
263
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) _lowerCAmelCase :Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''falcon''' a__ =['''past_key_values'''] def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any: _UpperCAmelCase : int = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A ) _UpperCAmelCase : int = hidden_size if n_embed is None else n_embed _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Optional[int] = use_cache _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase : Dict = alibi _UpperCAmelCase : Optional[int] = new_decoder_architecture _UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase : Optional[int] = parallel_attn _UpperCAmelCase : Optional[int] = bias super().__init__(bos_token_id=A , eos_token_id=A , **A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> List[Any]: return not self.alibi
263
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ): if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(UpperCamelCase__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) def lowerCamelCase_ (): _UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423] _UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
263
1
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowerCamelCase_ (): _UpperCAmelCase : Optional[Any] = ArgumentParser('''Transformers CLI tool''' , usage='''transformers-cli <command> [<args>]''' ) _UpperCAmelCase : Optional[Any] = parser.add_subparsers(help='''transformers-cli command helpers''' ) # Register commands ConvertCommand.register_subcommand(UpperCamelCase__ ) DownloadCommand.register_subcommand(UpperCamelCase__ ) EnvironmentCommand.register_subcommand(UpperCamelCase__ ) RunCommand.register_subcommand(UpperCamelCase__ ) ServeCommand.register_subcommand(UpperCamelCase__ ) UserCommands.register_subcommand(UpperCamelCase__ ) AddNewModelCommand.register_subcommand(UpperCamelCase__ ) AddNewModelLikeCommand.register_subcommand(UpperCamelCase__ ) LfsCommands.register_subcommand(UpperCamelCase__ ) PTtoTFCommand.register_subcommand(UpperCamelCase__ ) # Let's go _UpperCAmelCase : int = parser.parse_args() if not hasattr(UpperCamelCase__ , '''func''' ): parser.print_help() exit(1 ) # Run _UpperCAmelCase : Dict = args.func(UpperCamelCase__ ) service.run() if __name__ == "__main__": main()
263
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCAmelCase :Optional[Any] = False class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) _UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = generator.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = '''cyberpunk 2077''' _UpperCAmelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.dual_guided( prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images _UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger ''' _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.text_to_image( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images _UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images _UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
263
1
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =IFImgaImgSuperResolutionPipeline a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Any = torch.manual_seed(A ) else: _UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
263
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCAmelCase :Any = False @skip_mps class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ): '''simple docstring''' a__ =StableDiffusionAttendAndExcitePipeline a__ =False a__ =TEXT_TO_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) a__ =TEXT_TO_IMAGE_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __lowerCAmelCase ( cls ) -> List[str]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , ) _UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) _UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _UpperCAmelCase : List[str] = CLIPTextModel(A ) _UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCAmelCase : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: _UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : List[str] = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = '''cpu''' _UpperCAmelCase : Tuple = self.get_dummy_components() _UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Dict = self.get_dummy_inputs(A ) _UpperCAmelCase : Union[str, Any] = pipe(**A ).images _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) _UpperCAmelCase : int = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) _UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1E-3 ) def __lowerCAmelCase ( self ) -> Dict: super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> str: super().test_save_load_local(expected_max_difference=5E-4 ) def __lowerCAmelCase ( self ) -> Optional[int]: super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = torch.manual_seed(5_1 ) _UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses''' _UpperCAmelCase : int = [5, 7] _UpperCAmelCase : Dict = pipe( prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _UpperCAmelCase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
263
1
"""simple docstring""" import copy from typing import Dict, List, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING _lowerCAmelCase :Dict = { 'facebook/mask2former-swin-small-coco-instance': ( 'https://huggingface.co/facebook/mask2former-swin-small-coco-instance/blob/main/config.json' ) # See all Mask2Former models at https://huggingface.co/models?filter=mask2former } _lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__) class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''mask2former''' a__ =['''swin'''] a__ ={'''hidden_size''': '''hidden_dim'''} def __init__( self , A = None , A = 2_5_6 , A = 2_5_6 , A = 2_5_6 , A = 1_0_2_4 , A = "relu" , A = 6 , A = 1_0 , A = 8 , A = 0.0 , A = 2_0_4_8 , A = False , A = False , A = 4 , A = 2_5_5 , A = 1_0_0 , A = 0.1 , A = 2.0 , A = 5.0 , A = 5.0 , A = 1_2_5_4_4 , A = 3.0 , A = 0.75 , A = 0.02 , A = 1.0 , A = True , A = [4, 8, 1_6, 3_2] , A = None , **A , ) -> str: if backbone_config is None: logger.info('''`backbone_config` is `None`. Initializing the config with the default `Swin` backbone.''' ) _UpperCAmelCase : List[Any] = CONFIG_MAPPING['''swin''']( image_size=2_2_4 , in_channels=3 , patch_size=4 , embed_dim=9_6 , depths=[2, 2, 1_8, 2] , num_heads=[3, 6, 1_2, 2_4] , window_size=7 , drop_path_rate=0.3 , use_absolute_embeddings=A , out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] , ) if isinstance(A , A ): _UpperCAmelCase : int = backbone_config.pop('''model_type''' ) _UpperCAmelCase : Optional[Any] = CONFIG_MAPPING[backbone_model_type] _UpperCAmelCase : int = config_class.from_dict(A ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'Backbone {backbone_config.model_type} is not a supported model and may not be compatible with Mask2Former. ' f'Supported model types: {",".join(self.backbones_supported )}' ) _UpperCAmelCase : Dict = backbone_config _UpperCAmelCase : str = feature_size _UpperCAmelCase : List[Any] = mask_feature_size _UpperCAmelCase : List[str] = hidden_dim _UpperCAmelCase : Optional[Any] = encoder_feedforward_dim _UpperCAmelCase : Optional[Any] = activation_function _UpperCAmelCase : Any = encoder_layers _UpperCAmelCase : str = decoder_layers _UpperCAmelCase : Any = num_attention_heads _UpperCAmelCase : Union[str, Any] = dropout _UpperCAmelCase : List[Any] = dim_feedforward _UpperCAmelCase : Tuple = pre_norm _UpperCAmelCase : Tuple = enforce_input_projection _UpperCAmelCase : Tuple = common_stride _UpperCAmelCase : Union[str, Any] = ignore_value _UpperCAmelCase : List[str] = num_queries _UpperCAmelCase : int = no_object_weight _UpperCAmelCase : Any = class_weight _UpperCAmelCase : Optional[int] = mask_weight _UpperCAmelCase : Union[str, Any] = dice_weight _UpperCAmelCase : Optional[int] = train_num_points _UpperCAmelCase : int = oversample_ratio _UpperCAmelCase : List[Any] = importance_sample_ratio _UpperCAmelCase : Tuple = init_std _UpperCAmelCase : int = init_xavier_std _UpperCAmelCase : Tuple = use_auxiliary_loss _UpperCAmelCase : Any = feature_strides _UpperCAmelCase : Tuple = output_auxiliary_logits _UpperCAmelCase : Tuple = decoder_layers super().__init__(**A ) @classmethod def __lowerCAmelCase ( cls , A , **A ) -> List[str]: return cls( backbone_config=A , **A , ) def __lowerCAmelCase ( self ) -> Dict[str, any]: _UpperCAmelCase : List[str] = copy.deepcopy(self.__dict__ ) _UpperCAmelCase : Any = self.backbone_config.to_dict() _UpperCAmelCase : Dict = self.__class__.model_type return output
263
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Dict = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[str] = -1 _UpperCAmelCase : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[str] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : List[Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : str = TextStreamer(A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : List[str] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : List[Any] = -1 _UpperCAmelCase : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : str = tokenizer.decode(greedy_ids[0] ) _UpperCAmelCase : Union[str, Any] = TextIteratorStreamer(A ) _UpperCAmelCase : Any = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Any = Thread(target=model.generate , kwargs=A ) thread.start() _UpperCAmelCase : Any = '''''' for new_text in streamer: streamer_text += new_text self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : str = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Any = -1 _UpperCAmelCase : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : Dict = model.generate(A , max_new_tokens=1_0 , do_sample=A ) _UpperCAmelCase : Dict = greedy_ids[:, input_ids.shape[1] :] _UpperCAmelCase : List[str] = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: _UpperCAmelCase : Any = TextStreamer(A , skip_prompt=A ) model.generate(A , max_new_tokens=1_0 , do_sample=A , streamer=A ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _UpperCAmelCase : Union[str, Any] = cs.out[:-1] self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Optional[int]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them _UpperCAmelCase : int = AutoTokenizer.from_pretrained('''distilgpt2''' ) _UpperCAmelCase : Union[str, Any] = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(A ) _UpperCAmelCase : Tuple = -1 _UpperCAmelCase : int = torch.ones((1, 5) , device=A ).long() * model.config.bos_token_id with CaptureStdout() as cs: _UpperCAmelCase : Optional[Any] = TextStreamer(A , skip_special_tokens=A ) model.generate(A , max_new_tokens=1 , do_sample=A , streamer=A ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _UpperCAmelCase : Tuple = cs.out[:-1] # Remove the final "\n" _UpperCAmelCase : int = tokenizer(A , return_tensors='''pt''' ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Any = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) _UpperCAmelCase : Any = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(A ) _UpperCAmelCase : Dict = -1 _UpperCAmelCase : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(A ) _UpperCAmelCase : List[Any] = TextIteratorStreamer(A , timeout=0.001 ) _UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''max_new_tokens''': 1_0, '''do_sample''': False, '''streamer''': streamer} _UpperCAmelCase : Optional[Any] = Thread(target=model.generate , kwargs=A ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(A ): _UpperCAmelCase : Optional[Any] = '''''' for new_text in streamer: streamer_text += new_text
263
1
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _UpperCAmelCase ( a ,unittest.TestCase ): '''simple docstring''' a__ =MgpstrTokenizer a__ =False a__ ={} a__ =False def __lowerCAmelCase ( self ) -> List[Any]: super().setUp() # fmt: off _UpperCAmelCase : List[str] = ['''[GO]''', '''[s]''', '''0''', '''1''', '''2''', '''3''', '''4''', '''5''', '''6''', '''7''', '''8''', '''9''', '''a''', '''b''', '''c''', '''d''', '''e''', '''f''', '''g''', '''h''', '''i''', '''j''', '''k''', '''l''', '''m''', '''n''', '''o''', '''p''', '''q''', '''r''', '''s''', '''t''', '''u''', '''v''', '''w''', '''x''', '''y''', '''z'''] # fmt: on _UpperCAmelCase : Optional[Any] = dict(zip(A , range(len(A ) ) ) ) _UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(A ) + '''\n''' ) def __lowerCAmelCase ( self , **A ) -> List[str]: return MgpstrTokenizer.from_pretrained(self.tmpdirname , **A ) def __lowerCAmelCase ( self , A ) -> Dict: _UpperCAmelCase : int = '''tester''' _UpperCAmelCase : Tuple = '''tester''' return input_text, output_text @unittest.skip('''MGP-STR always lower cases letters.''' ) def __lowerCAmelCase ( self ) -> str: pass def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=A ) for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase : Optional[int] = '''[SPECIAL_TOKEN]''' tokenizer.add_special_tokens({'''cls_token''': special_token} ) _UpperCAmelCase : List[str] = tokenizer.encode([special_token] , add_special_tokens=A ) self.assertEqual(len(A ) , 1 ) _UpperCAmelCase : Union[str, Any] = tokenizer.decode(A , skip_special_tokens=A ) self.assertTrue(special_token not in decoded ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'{tokenizer.__class__.__name__}' ): _UpperCAmelCase , _UpperCAmelCase : List[Any] = self.get_input_output_texts(A ) _UpperCAmelCase : str = tokenizer.tokenize(A ) _UpperCAmelCase : Any = tokenizer.convert_tokens_to_ids(A ) _UpperCAmelCase : Dict = tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) _UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(A ) self.assertNotEqual(len(A ) , 0 ) _UpperCAmelCase : List[Any] = tokenizer.decode(A ) self.assertIsInstance(A , A ) self.assertEqual(text_a.replace(''' ''' , '''''' ) , A ) @unittest.skip('''MGP-STR tokenizer only handles one sequence.''' ) def __lowerCAmelCase ( self ) -> Dict: pass @unittest.skip('''inputs cannot be pretokenized in MgpstrTokenizer''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass
263
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase_ (UpperCamelCase__ : float ): if num <= 0: raise ValueError('''math domain error''' ) return quad(UpperCamelCase__ , 0 , UpperCamelCase__ , args=(UpperCamelCase__) )[0] def lowerCamelCase_ (UpperCamelCase__ : float , UpperCamelCase__ : float ): return math.pow(UpperCamelCase__ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
263
1
"""simple docstring""" import argparse import json import os import re import shutil import torch from transformers import BioGptConfig, BioGptForCausalLM from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE from transformers.utils import WEIGHTS_NAME, logging logging.set_verbosity_warning() _lowerCAmelCase :List[Any] = 2 class _UpperCAmelCase : '''simple docstring''' def __init__( self , *, # begin keyword-only arguments A="<s>" , A="<pad>" , A="</s>" , A="<unk>" , A=None , ) -> List[Any]: _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = bos, unk, pad, eos _UpperCAmelCase : Union[str, Any] = [] _UpperCAmelCase : Tuple = [] _UpperCAmelCase : int = {} _UpperCAmelCase : str = self.add_symbol(A ) _UpperCAmelCase : List[str] = self.add_symbol(A ) _UpperCAmelCase : List[str] = self.add_symbol(A ) _UpperCAmelCase : str = self.add_symbol(A ) if extra_special_symbols: for s in extra_special_symbols: self.add_symbol(A ) _UpperCAmelCase : Dict = len(self.symbols ) def __eq__( self , A ) -> Any: return self.indices == other.indices def __getitem__( self , A ) -> Union[str, Any]: if idx < len(self.symbols ): return self.symbols[idx] return self.unk_word def __len__( self ) -> Optional[int]: return len(self.symbols ) def __contains__( self , A ) -> List[Any]: return sym in self.indices @classmethod def __lowerCAmelCase ( cls , A ) -> Tuple: _UpperCAmelCase : Any = cls() d.add_from_file(A ) return d def __lowerCAmelCase ( self , A , A=1 , A=False ) -> Dict: if word in self.indices and not overwrite: _UpperCAmelCase : Tuple = self.indices[word] _UpperCAmelCase : Tuple = self.count[idx] + n return idx else: _UpperCAmelCase : Union[str, Any] = len(self.symbols ) _UpperCAmelCase : str = idx self.symbols.append(A ) self.count.append(A ) return idx def __lowerCAmelCase ( self , A ) -> Optional[int]: return 0 def __lowerCAmelCase ( self , A ) -> Union[str, Any]: if isinstance(A , A ): try: with open(A , '''r''' , encoding='''utf-8''' ) as fd: self.add_from_file(A ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception('''Incorrect encoding detected in {}, please rebuild the dataset'''.format(A ) ) return _UpperCAmelCase : Optional[Any] = f.readlines() _UpperCAmelCase : Optional[Any] = self._load_meta(A ) for line in lines[indices_start_line:]: try: _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = line.rstrip().rsplit(''' ''' , 1 ) if field == "#fairseq:overwrite": _UpperCAmelCase : List[str] = True _UpperCAmelCase , _UpperCAmelCase : Any = line.rsplit(''' ''' , 1 ) else: _UpperCAmelCase : Optional[Any] = False _UpperCAmelCase : Optional[Any] = int(A ) _UpperCAmelCase : List[str] = line if word in self and not overwrite: raise RuntimeError( '''Duplicate word found when loading Dictionary: \'{}\'. ''' '''Duplicate words can overwrite earlier ones by adding the ''' '''#fairseq:overwrite flag at the end of the corresponding row ''' '''in the dictionary file. If using the Camembert model, please ''' '''download an updated copy of the model file.'''.format(A ) ) self.add_symbol(A , n=A , overwrite=A ) except ValueError: raise ValueError('''Incorrect dictionary format, expected \'<token> <cnt> [flags]\'''' ) def lowerCamelCase_ (UpperCamelCase__ : Tuple ): # (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up, # e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7} _UpperCAmelCase : Any = dict((re.sub(r'''@@$''' , '''''' , UpperCamelCase__ ), v) if k.endswith('''@@''' ) else (re.sub(r'''$''' , '''</w>''' , UpperCamelCase__ ), v) for k, v in d.items() ) _UpperCAmelCase : Optional[Any] = '''<s> <pad> </s> <unk>'''.split() # restore the special tokens for k in keep_keys: del da[F'{k}</w>'] _UpperCAmelCase : Union[str, Any] = d[k] # restore return da def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ): # prep if not os.path.exists(UpperCamelCase__ ): raise ValueError(F'path {biogpt_checkpoint_path} does not exist!' ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) print(F'Writing results to {pytorch_dump_folder_path}' ) # handle various types of models _UpperCAmelCase : List[Any] = os.path.join(UpperCamelCase__ , '''checkpoint.pt''' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(F'path to the file {checkpoint_file} does not exist!' ) _UpperCAmelCase : Optional[int] = torch.load(UpperCamelCase__ , map_location='''cpu''' ) _UpperCAmelCase : Optional[int] = chkpt['''cfg''']['''model'''] # dicts _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , '''dict.txt''' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(F'path to the file {dict_file} does not exist!' ) _UpperCAmelCase : Tuple = Dictionary.load(UpperCamelCase__ ) _UpperCAmelCase : Dict = rewrite_dict_keys(src_dict.indices ) _UpperCAmelCase : Optional[Any] = len(UpperCamelCase__ ) _UpperCAmelCase : Tuple = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''vocab_file'''] ) print(F'Generating {src_vocab_file} of {src_vocab_size} records' ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # merges_file (bpecodes) _UpperCAmelCase : int = os.path.join(UpperCamelCase__ , '''bpecodes''' ) if not os.path.isfile(UpperCamelCase__ ): raise ValueError(F'path to the file {bpecodes_file} does not exist!' ) _UpperCAmelCase : int = os.path.join(UpperCamelCase__ , VOCAB_FILES_NAMES['''merges_file'''] ) shutil.copyfile(UpperCamelCase__ , UpperCamelCase__ ) # model config _UpperCAmelCase : Dict = os.path.join(UpperCamelCase__ , '''config.json''' ) _UpperCAmelCase : Tuple = { '''activation_dropout''': args['''activation_dropout'''], '''architectures''': ['''BioGptForCausalLM'''], '''attention_probs_dropout_prob''': args['''attention_dropout'''], '''bos_token_id''': 0, '''eos_token_id''': 2, '''hidden_act''': args['''activation_fn'''], '''hidden_dropout_prob''': args['''dropout'''], '''hidden_size''': args['''decoder_embed_dim'''], '''initializer_range''': 0.02, '''intermediate_size''': args['''decoder_ffn_embed_dim'''], '''layer_norm_eps''': 1E-1_2, '''layerdrop''': args['''decoder_layerdrop'''], '''max_position_embeddings''': args['''max_target_positions'''], '''model_type''': '''biogpt''', '''num_attention_heads''': args['''decoder_attention_heads'''], '''num_hidden_layers''': args['''decoder_layers'''], '''pad_token_id''': 1, '''scale_embedding''': not args['''no_scale_embedding'''], '''tie_word_embeddings''': args['''share_decoder_input_output_embed'''], '''vocab_size''': src_vocab_size, } # good hparam defaults to start with print(F'Generating {biogpt_model_config_file}' ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # tokenizer config _UpperCAmelCase : List[Any] = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : List[Any] = { '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', '''model_max_length''': 1024, '''pad_token''': '''<pad>''', '''special_tokens_map_file''': None, '''tokenizer_class''': '''BioGptTokenizer''', '''unk_token''': '''<unk>''', } print(F'Generating {biogpt_tokenizer_config_file}' ) with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(UpperCamelCase__ , ensure_ascii=UpperCamelCase__ , indent=UpperCamelCase__ ) ) # model _UpperCAmelCase : Union[str, Any] = chkpt['''model'''] # remove unneeded keys _UpperCAmelCase : Optional[Any] = [ '''decoder.version''', ] for k in ignore_keys: model_state_dict.pop(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Dict = list(model_state_dict.keys() ) for layer_name in layer_names: if layer_name.endswith('''output_projection.weight''' ): _UpperCAmelCase : Dict = model_state_dict.pop(UpperCamelCase__ ) else: _UpperCAmelCase : Optional[Any] = model_state_dict.pop(UpperCamelCase__ ) _UpperCAmelCase : int = BioGptConfig.from_pretrained(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = BioGptForCausalLM(UpperCamelCase__ ) # check that it loads ok model_new.load_state_dict(UpperCamelCase__ ) # save _UpperCAmelCase : Tuple = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) print(F'Generating {pytorch_weights_dump_path}' ) torch.save(UpperCamelCase__ , UpperCamelCase__ ) print('''Conversion is done!''' ) if __name__ == "__main__": _lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--biogpt_checkpoint_path', default=None, type=str, required=True, help=( 'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,' ' bpecodes, etc.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) _lowerCAmelCase :int = parser.parse_args() convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" _UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:] _UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import ( AudioDiffusionPipeline, AutoencoderKL, DDIMScheduler, DDPMScheduler, DiffusionPipeline, Mel, UNetaDConditionModel, UNetaDModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu enable_full_determinism() class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCAmelCase ( self ) -> Optional[Any]: torch.manual_seed(0 ) _UpperCAmelCase : Tuple = UNetaDModel( sample_size=(3_2, 6_4) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , ) return model @property def __lowerCAmelCase ( self ) -> Dict: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=1_0 , ) return model @property def __lowerCAmelCase ( self ) -> Union[str, Any]: torch.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = AutoencoderKL( sample_size=(1_2_8, 6_4) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , ) _UpperCAmelCase : Tuple = UNetaDModel( sample_size=(6_4, 3_2) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(1_2_8, 1_2_8) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , ) return vqvae, unet @slow def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : Optional[int] = '''cpu''' # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : Any = Mel( x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , ) _UpperCAmelCase : Tuple = DDPMScheduler() _UpperCAmelCase : str = AudioDiffusionPipeline(vqvae=A , unet=self.dummy_unet , mel=A , scheduler=A ) _UpperCAmelCase : Optional[int] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Tuple = torch.Generator(device=A ).manual_seed(4_2 ) _UpperCAmelCase : Optional[int] = pipe(generator=A , steps=4 ) _UpperCAmelCase : Dict = output.audios[0] _UpperCAmelCase : Tuple = output.images[0] _UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(4_2 ) _UpperCAmelCase : List[str] = pipe(generator=A , steps=4 , return_dict=A ) _UpperCAmelCase : Union[str, Any] = output[0][0] assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length) assert ( image.height == self.dummy_unet.config.sample_size[0] and image.width == self.dummy_unet.config.sample_size[1] ) _UpperCAmelCase : Optional[Any] = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0] _UpperCAmelCase : Optional[Any] = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:1_0] _UpperCAmelCase : Optional[int] = np.array([6_9, 2_5_5, 2_5_5, 2_5_5, 0, 0, 7_7, 1_8_1, 1_2, 1_2_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0 _UpperCAmelCase : str = Mel( x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , ) _UpperCAmelCase : Any = DDIMScheduler() _UpperCAmelCase : Optional[int] = self.dummy_vqvae_and_unet _UpperCAmelCase : Optional[int] = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=A , scheduler=A ) _UpperCAmelCase : str = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) np.random.seed(0 ) _UpperCAmelCase : Optional[int] = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) ) _UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(4_2 ) _UpperCAmelCase : List[str] = pipe(raw_audio=A , generator=A , start_step=5 , steps=1_0 ) _UpperCAmelCase : Dict = output.images[0] assert ( image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0] and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1] ) _UpperCAmelCase : Dict = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0] _UpperCAmelCase : List[Any] = np.array([1_2_0, 1_1_7, 1_1_0, 1_0_9, 1_3_8, 1_6_7, 1_3_8, 1_4_8, 1_3_2, 1_2_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 _UpperCAmelCase : Dict = self.dummy_unet_condition _UpperCAmelCase : str = AudioDiffusionPipeline( vqvae=self.dummy_vqvae_and_unet[0] , unet=A , mel=A , scheduler=A ) _UpperCAmelCase : List[Any] = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) np.random.seed(0 ) _UpperCAmelCase : Dict = torch.rand((1, 1, 1_0) ) _UpperCAmelCase : Optional[Any] = pipe(generator=A , encoding=A ) _UpperCAmelCase : Optional[int] = output.images[0] _UpperCAmelCase : int = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0] _UpperCAmelCase : Tuple = np.array([1_0_7, 1_0_3, 1_2_0, 1_2_7, 1_4_2, 1_2_2, 1_1_3, 1_2_2, 9_7, 1_1_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0 @slow @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[str]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Union[str, Any] = torch_device _UpperCAmelCase : int = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' ) _UpperCAmelCase : Any = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Tuple = torch.Generator(device=A ).manual_seed(4_2 ) _UpperCAmelCase : Optional[Any] = pipe(generator=A ) _UpperCAmelCase : Dict = output.audios[0] _UpperCAmelCase : Optional[int] = output.images[0] assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length) assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1] _UpperCAmelCase : str = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:1_0] _UpperCAmelCase : Dict = np.array([1_5_1, 1_6_7, 1_5_4, 1_4_4, 1_2_2, 1_3_4, 1_2_1, 1_0_5, 7_0, 2_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase :int = {'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST', 'ViTMSNModel', 'ViTMSNForImageClassification', 'ViTMSNPreTrainedModel', ] if TYPE_CHECKING: from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_msn import ( VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMSNForImageClassification, ViTMSNModel, ViTMSNPreTrainedModel, ) else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" import hashlib import unittest from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available from transformers.pipelines import DepthEstimationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_torch_available(): import torch if is_vision_available(): from PIL import Image else: class _UpperCAmelCase : '''simple docstring''' @staticmethod def __lowerCAmelCase ( *A , **A ) -> str: pass def lowerCamelCase_ (UpperCamelCase__ : Image ): _UpperCAmelCase : int = hashlib.mda(image.tobytes() ) return m.hexdigest() @is_pipeline_test @require_vision @require_timm @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' a__ =MODEL_FOR_DEPTH_ESTIMATION_MAPPING def __lowerCAmelCase ( self , A , A , A ) -> Optional[int]: _UpperCAmelCase : int = DepthEstimationPipeline(model=A , image_processor=A ) return depth_estimator, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def __lowerCAmelCase ( self , A , A ) -> List[Any]: _UpperCAmelCase : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , A ) import datasets _UpperCAmelCase : Dict = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' ) _UpperCAmelCase : int = depth_estimator( [ Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ), '''http://images.cocodataset.org/val2017/000000039769.jpg''', # RGBA dataset[0]['''file'''], # LA dataset[1]['''file'''], # L dataset[2]['''file'''], ] ) self.assertEqual( [ {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, {'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )}, ] , A , ) @require_tf @unittest.skip('''Depth estimation is not implemented in TF''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Optional[Any] = '''Intel/dpt-large''' _UpperCAmelCase : Optional[int] = pipeline('''depth-estimation''' , model=A ) _UpperCAmelCase : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' ) _UpperCAmelCase : Tuple = hashimage(outputs['''depth'''] ) # This seems flaky. # self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977") self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 ) self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 ) @require_torch def __lowerCAmelCase ( self ) -> Tuple: # This is highly irregular to have no small tests. self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
263
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) _lowerCAmelCase :List[str] = '▁' _lowerCAmelCase :Tuple = {'vocab_file': 'sentencepiece.bpe.model'} _lowerCAmelCase :List[Any] = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } _lowerCAmelCase :Tuple = { 'xlm-roberta-base': 512, 'xlm-roberta-large': 512, 'xlm-roberta-large-finetuned-conll02-dutch': 512, 'xlm-roberta-large-finetuned-conll02-spanish': 512, 'xlm-roberta-large-finetuned-conll03-english': 512, 'xlm-roberta-large-finetuned-conll03-german': 512, } class _UpperCAmelCase ( a ): '''simple docstring''' a__ =VOCAB_FILES_NAMES a__ =PRETRAINED_VOCAB_FILES_MAP a__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ =['''input_ids''', '''attention_mask'''] def __init__( self , A , A="<s>" , A="</s>" , A="</s>" , A="<s>" , A="<unk>" , A="<pad>" , A="<mask>" , A = None , **A , ) -> None: # Mask token behave like a normal word, i.e. include the space before it _UpperCAmelCase : Tuple = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token _UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=A , eos_token=A , unk_token=A , sep_token=A , cls_token=A , pad_token=A , mask_token=A , sp_model_kwargs=self.sp_model_kwargs , **A , ) _UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(A ) ) _UpperCAmelCase : List[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token _UpperCAmelCase : List[str] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab _UpperCAmelCase : Any = 1 _UpperCAmelCase : Optional[Any] = len(self.sp_model ) + self.fairseq_offset _UpperCAmelCase : int = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Union[str, Any]: _UpperCAmelCase : Tuple = self.__dict__.copy() _UpperCAmelCase : List[str] = None _UpperCAmelCase : str = self.sp_model.serialized_model_proto() return state def __setstate__( self , A ) -> Optional[int]: _UpperCAmelCase : Optional[int] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): _UpperCAmelCase : Optional[Any] = {} _UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def __lowerCAmelCase ( self , A , A = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] _UpperCAmelCase : Any = [self.cls_token_id] _UpperCAmelCase : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowerCAmelCase ( self , A , A = None , A = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=A , token_ids_a=A , already_has_special_tokens=A ) if token_ids_a is None: return [1] + ([0] * len(A )) + [1] return [1] + ([0] * len(A )) + [1, 1] + ([0] * len(A )) + [1] def __lowerCAmelCase ( self , A , A = None ) -> List[int]: _UpperCAmelCase : Dict = [self.sep_token_id] _UpperCAmelCase : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __lowerCAmelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Dict = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __lowerCAmelCase ( self , A ) -> List[str]: return self.sp_model.encode(A , out_type=A ) def __lowerCAmelCase ( self , A ) -> Any: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] _UpperCAmelCase : Any = self.sp_model.PieceToId(A ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def __lowerCAmelCase ( self , A ) -> int: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def __lowerCAmelCase ( self , A ) -> int: _UpperCAmelCase : str = ''''''.join(A ).replace(A , ''' ''' ).strip() return out_string def __lowerCAmelCase ( self , A , A = None ) -> Tuple[str]: if not os.path.isdir(A ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return _UpperCAmelCase : List[Any] = os.path.join( A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(A ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , A ) elif not os.path.isfile(self.vocab_file ): with open(A , '''wb''' ) as fi: _UpperCAmelCase : str = self.sp_model.serialized_model_proto() fi.write(A ) return (out_vocab_file,)
263
1
"""simple docstring""" import importlib.util import json import os import warnings from dataclasses import dataclass, field import torch from ..training_args import TrainingArguments from ..utils import cached_property, is_sagemaker_dp_enabled, logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) def lowerCamelCase_ (): # Get the sagemaker specific mp parameters from smp_options variable. _UpperCAmelCase : Optional[Any] = os.getenv('''SM_HP_MP_PARAMETERS''' , '''{}''' ) try: # Parse it and check the field "partitions" is included, it is required for model parallel. _UpperCAmelCase : Optional[Any] = json.loads(UpperCamelCase__ ) if "partitions" not in smp_options: return False except json.JSONDecodeError: return False # Get the sagemaker specific framework parameters from mpi_options variable. _UpperCAmelCase : Optional[int] = os.getenv('''SM_FRAMEWORK_PARAMS''' , '''{}''' ) try: # Parse it and check the field "sagemaker_distributed_dataparallel_enabled". _UpperCAmelCase : int = json.loads(UpperCamelCase__ ) if not mpi_options.get('''sagemaker_mpi_enabled''' , UpperCamelCase__ ): return False except json.JSONDecodeError: return False # Lastly, check if the `smdistributed` module is present. return importlib.util.find_spec('''smdistributed''' ) is not None if is_sagemaker_model_parallel_available(): import smdistributed.modelparallel.torch as smp smp.init() @dataclass class _UpperCAmelCase ( a ): '''simple docstring''' a__ =field( default='''''' ,metadata={'''help''': '''Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'''} ,) def __lowerCAmelCase ( self ) -> Optional[Any]: super().__post_init__() warnings.warn( '''`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use ''' '''`TrainingArguments` instead.''' , A , ) @cached_property def __lowerCAmelCase ( self ) -> "torch.device": logger.info('''PyTorch: setting up devices''' ) if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1: logger.warning( '''torch.distributed process group is initialized, but local_rank == -1. ''' '''In order to use Torch DDP, launch your script with `python -m torch.distributed.launch''' ) if self.no_cuda: _UpperCAmelCase : Dict = torch.device('''cpu''' ) _UpperCAmelCase : Union[str, Any] = 0 elif is_sagemaker_model_parallel_available(): _UpperCAmelCase : Optional[Any] = smp.local_rank() _UpperCAmelCase : str = torch.device('''cuda''' , A ) _UpperCAmelCase : Optional[int] = 1 elif is_sagemaker_dp_enabled(): import smdistributed.dataparallel.torch.torch_smddp # noqa: F401 torch.distributed.init_process_group(backend='''smddp''' , timeout=self.ddp_timeout_delta ) _UpperCAmelCase : Optional[int] = int(os.getenv('''SMDATAPARALLEL_LOCAL_RANK''' ) ) _UpperCAmelCase : List[str] = torch.device('''cuda''' , self.local_rank ) _UpperCAmelCase : Optional[Any] = 1 elif self.local_rank == -1: # if n_gpu is > 1 we'll use nn.DataParallel. # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` # Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will # trigger an error that a device index is missing. Index 0 takes into account the # GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0` # will use the first GPU in that env, i.e. GPU#1 _UpperCAmelCase : List[str] = torch.device('''cuda:0''' if torch.cuda.is_available() else '''cpu''' ) # Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at # the default value. _UpperCAmelCase : List[str] = torch.cuda.device_count() else: # Here, we'll use torch.distributed. # Initializes the distributed backend which will take care of synchronizing nodes/GPUs if not torch.distributed.is_initialized(): torch.distributed.init_process_group(backend='''nccl''' , timeout=self.ddp_timeout_delta ) _UpperCAmelCase : str = torch.device('''cuda''' , self.local_rank ) _UpperCAmelCase : Union[str, Any] = 1 if device.type == "cuda": torch.cuda.set_device(A ) return device @property def __lowerCAmelCase ( self ) -> List[str]: if is_sagemaker_model_parallel_available(): return smp.dp_size() return super().world_size @property def __lowerCAmelCase ( self ) -> str: return not is_sagemaker_model_parallel_available() @property def __lowerCAmelCase ( self ) -> List[Any]: return False
263
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , *A , **A ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , A , ) super().__init__(*A , **A )
263
1
"""simple docstring""" import warnings from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, optimal_fft_length, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging _lowerCAmelCase :Tuple = logging.get_logger(__name__) class _UpperCAmelCase ( a ): '''simple docstring''' a__ =['''input_values''', '''attention_mask'''] def __init__( self , A = 1 , A = 1_6_0_0_0 , A = 0.0 , A = False , A = 8_0 , A = 1_6 , A = 6_4 , A = "hann_window" , A = 1.0 , A = 8_0 , A = 7_6_0_0 , A = 1E-10 , A = 2 , A = True , **A , ) -> Any: super().__init__(feature_size=A , sampling_rate=A , padding_value=A , **A ) _UpperCAmelCase : str = do_normalize _UpperCAmelCase : Optional[int] = return_attention_mask _UpperCAmelCase : List[str] = num_mel_bins _UpperCAmelCase : str = hop_length _UpperCAmelCase : Dict = win_length _UpperCAmelCase : Tuple = win_function _UpperCAmelCase : Optional[int] = frame_signal_scale _UpperCAmelCase : Union[str, Any] = fmin _UpperCAmelCase : int = fmax _UpperCAmelCase : Any = mel_floor _UpperCAmelCase : Any = reduction_factor _UpperCAmelCase : List[Any] = win_length * sampling_rate // 1_0_0_0 _UpperCAmelCase : Any = hop_length * sampling_rate // 1_0_0_0 _UpperCAmelCase : Union[str, Any] = optimal_fft_length(self.sample_size ) _UpperCAmelCase : List[Any] = (self.n_fft // 2) + 1 _UpperCAmelCase : Optional[int] = window_function(window_length=self.sample_size , name=self.win_function , periodic=A ) _UpperCAmelCase : str = mel_filter_bank( num_frequency_bins=self.n_freqs , num_mel_filters=self.num_mel_bins , min_frequency=self.fmin , max_frequency=self.fmax , sampling_rate=self.sampling_rate , norm='''slaney''' , mel_scale='''slaney''' , ) if frame_signal_scale != 1.0: warnings.warn( '''The argument `frame_signal_scale` is deprecated and will be removed in version 4.30.0 of Transformers''' , A , ) if reduction_factor != 2.0: warnings.warn( '''The argument `reduction_factor` is deprecated and will be removed in version 4.30.0 of Transformers''' , A , ) @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __lowerCAmelCase ( A , A , A = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: _UpperCAmelCase : Optional[Any] = np.array(A , np.intaa ) _UpperCAmelCase : Optional[int] = [] for vector, length in zip(A , attention_mask.sum(-1 ) ): _UpperCAmelCase : Tuple = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: _UpperCAmelCase : Union[str, Any] = padding_value normed_input_values.append(A ) else: _UpperCAmelCase : List[str] = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __lowerCAmelCase ( self , A , ) -> np.ndarray: _UpperCAmelCase : Any = spectrogram( A , window=self.window , frame_length=self.sample_size , hop_length=self.sample_stride , fft_length=self.n_fft , mel_filters=self.mel_filters , mel_floor=self.mel_floor , log_mel='''log10''' , ) return log_mel_spec.T def __call__( self , A = None , A = None , A = False , A = None , A = False , A = None , A = None , A = None , A = None , **A , ) -> BatchFeature: if audio is None and audio_target is None: raise ValueError('''You must provide either `audio` or `audio_target` values.''' ) if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' f' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' f' {self.sampling_rate} and not {sampling_rate}.' ) else: logger.warning( '''It is strongly recommended to pass the ``sampling_rate`` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) if audio is not None: _UpperCAmelCase : Optional[int] = self._process_audio( A , A , A , A , A , A , A , A , **A , ) else: _UpperCAmelCase : Optional[Any] = None if audio_target is not None: _UpperCAmelCase : Optional[Any] = self._process_audio( A , A , A , A , A , A , A , A , **A , ) if inputs is None: return inputs_target else: _UpperCAmelCase : Optional[Any] = inputs_target['''input_values'''] _UpperCAmelCase : str = inputs_target.get('''attention_mask''' ) if decoder_attention_mask is not None: _UpperCAmelCase : Optional[int] = decoder_attention_mask return inputs def __lowerCAmelCase ( self , A , A = False , A = False , A = None , A = False , A = None , A = None , A = None , **A , ) -> BatchFeature: _UpperCAmelCase : int = isinstance(A , np.ndarray ) and len(speech.shape ) > 1 if is_batched_numpy and len(speech.shape ) > 2: raise ValueError(f'Only mono-channel audio is supported for input to {self}' ) _UpperCAmelCase : Optional[Any] = is_batched_numpy or ( isinstance(A , (list, tuple) ) and (isinstance(speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: _UpperCAmelCase : Optional[Any] = [np.asarray(A , dtype=np.floataa ) for speech in speech] elif not is_batched and not isinstance(A , np.ndarray ): _UpperCAmelCase : List[Any] = np.asarray(A , dtype=np.floataa ) elif isinstance(A , np.ndarray ) and speech.dtype is np.dtype(np.floataa ): _UpperCAmelCase : Tuple = speech.astype(np.floataa ) # always return batch if not is_batched: _UpperCAmelCase : Union[str, Any] = [speech] # needed to make pad() work on spectrogram inputs _UpperCAmelCase : Optional[Any] = self.feature_size # convert into correct format for padding if is_target: _UpperCAmelCase : List[Any] = [self._extract_mel_features(A ) for waveform in speech] _UpperCAmelCase : Optional[Any] = BatchFeature({'''input_values''': features} ) _UpperCAmelCase : Any = self.num_mel_bins else: _UpperCAmelCase : Union[str, Any] = BatchFeature({'''input_values''': speech} ) _UpperCAmelCase : Tuple = self.pad( A , padding=A , max_length=A , truncation=A , pad_to_multiple_of=A , return_attention_mask=A , **A , ) _UpperCAmelCase : Optional[Any] = feature_size_hack # convert input values to correct format _UpperCAmelCase : Optional[int] = padded_inputs['''input_values'''] if not isinstance(input_values[0] , np.ndarray ): _UpperCAmelCase : int = [np.asarray(A , dtype=np.floataa ) for array in input_values] elif ( not isinstance(A , np.ndarray ) and isinstance(input_values[0] , np.ndarray ) and input_values[0].dtype is np.dtype(np.floataa ) ): _UpperCAmelCase : Optional[Any] = [array.astype(np.floataa ) for array in input_values] elif isinstance(A , np.ndarray ) and input_values.dtype is np.dtype(np.floataa ): _UpperCAmelCase : int = input_values.astype(np.floataa ) # convert attention_mask to correct format _UpperCAmelCase : Union[str, Any] = padded_inputs.get('''attention_mask''' ) if attention_mask is not None: _UpperCAmelCase : List[Any] = [np.asarray(A , dtype=np.intaa ) for array in attention_mask] # zero-mean and unit-variance normalization if not is_target and self.do_normalize: _UpperCAmelCase : Optional[int] = ( attention_mask if self._get_padding_strategies(A , max_length=A ) is not PaddingStrategy.DO_NOT_PAD else None ) _UpperCAmelCase : str = self.zero_mean_unit_var_norm( padded_inputs['''input_values'''] , attention_mask=A , padding_value=self.padding_value ) if return_tensors is not None: _UpperCAmelCase : Optional[Any] = padded_inputs.convert_to_tensors(A ) return padded_inputs def __lowerCAmelCase ( self ) -> Dict[str, Any]: _UpperCAmelCase : List[Any] = super().to_dict() # Don't serialize these as they are derived from the other properties. _UpperCAmelCase : Tuple = ['''window''', '''mel_filters''', '''sample_size''', '''sample_stride''', '''n_fft''', '''n_freqs'''] for name in names: if name in output: del output[name] return output
263
"""simple docstring""" import argparse import json import os import torch from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer from transformers.tokenization_utils_base import AddedToken @torch.no_grad() def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ): # Load configuration defined in the metadata file with open(UpperCamelCase__ ) as metadata_file: _UpperCAmelCase : Dict = json.load(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['''model_config'''] ) # Load in the weights from the checkpoint_path _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ , map_location='''cpu''' ) # Load the entity vocab file _UpperCAmelCase : Optional[int] = load_entity_vocab(UpperCamelCase__ ) _UpperCAmelCase : Optional[int] = RobertaTokenizer.from_pretrained(metadata['''model_config''']['''bert_model_name'''] ) # Add special tokens to the token vocabulary for downstream tasks _UpperCAmelCase : int = AddedToken('''<ent>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = AddedToken('''<ent2>''' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) tokenizer.add_special_tokens({'''additional_special_tokens''': [entity_token_a, entity_token_a]} ) config.vocab_size += 2 print(F'Saving tokenizer to {pytorch_dump_folder_path}' ) tokenizer.save_pretrained(UpperCamelCase__ ) with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['''entity_vocab_file'''] ) , '''w''' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : Any = LukeTokenizer.from_pretrained(UpperCamelCase__ ) # Initialize the embeddings of the special tokens _UpperCAmelCase : str = state_dict['''embeddings.word_embeddings.weight'''] _UpperCAmelCase : Dict = word_emb[tokenizer.convert_tokens_to_ids(['''@'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Union[str, Any] = word_emb[tokenizer.convert_tokens_to_ids(['''#'''] )[0]].unsqueeze(0 ) _UpperCAmelCase : Tuple = torch.cat([word_emb, ent_emb, enta_emb] ) # Initialize the query layers of the entity-aware self-attention mechanism for layer_index in range(config.num_hidden_layers ): for matrix_name in ["query.weight", "query.bias"]: _UpperCAmelCase : List[Any] = F'encoder.layer.{layer_index}.attention.self.' _UpperCAmelCase : Optional[Any] = state_dict[prefix + matrix_name] _UpperCAmelCase : Tuple = state_dict[prefix + matrix_name] _UpperCAmelCase : str = state_dict[prefix + matrix_name] # Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks _UpperCAmelCase : Any = state_dict['''entity_embeddings.entity_embeddings.weight'''] _UpperCAmelCase : Dict = entity_emb[entity_vocab['''[MASK]''']] _UpperCAmelCase : Optional[int] = LukeModel(config=UpperCamelCase__ ).eval() _UpperCAmelCase , _UpperCAmelCase : int = model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ ) if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"): raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' ) if not (all(key.startswith('''entity_predictions''' ) or key.startswith('''lm_head''' ) for key in unexpected_keys )): raise ValueError( '''Unexpected keys''' F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' ) # Check outputs _UpperCAmelCase : Optional[int] = LukeTokenizer.from_pretrained(UpperCamelCase__ , task='''entity_classification''' ) _UpperCAmelCase : List[str] = ( '''Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the''' ''' new world number one avoid a humiliating second- round exit at Wimbledon .''' ) _UpperCAmelCase : Dict = (39, 42) _UpperCAmelCase : Any = tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='''pt''' ) _UpperCAmelCase : List[Any] = model(**UpperCamelCase__ ) # Verify word hidden states if model_size == "large": _UpperCAmelCase : str = torch.Size((1, 42, 1024) ) _UpperCAmelCase : Union[str, Any] = torch.tensor( [[0.0133, 0.0865, 0.0095], [0.3093, -0.2576, -0.7418], [-0.1720, -0.2117, -0.2869]] ) else: # base _UpperCAmelCase : Optional[Any] = torch.Size((1, 42, 768) ) _UpperCAmelCase : str = torch.tensor([[0.0037, 0.1368, -0.0091], [0.1099, 0.3329, -0.1095], [0.0765, 0.5335, 0.1179]] ) if not (outputs.last_hidden_state.shape == expected_shape): raise ValueError( F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' ) if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Verify entity hidden states if model_size == "large": _UpperCAmelCase : int = torch.Size((1, 1, 1024) ) _UpperCAmelCase : str = torch.tensor([[0.0466, -0.0106, -0.0179]] ) else: # base _UpperCAmelCase : List[str] = torch.Size((1, 1, 768) ) _UpperCAmelCase : List[Any] = torch.tensor([[0.1457, 0.1044, 0.0174]] ) if not (outputs.entity_last_hidden_state.shape != expected_shape): raise ValueError( F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is' F' {expected_shape}' ) if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1E-4 ): raise ValueError # Finally, save our PyTorch model and tokenizer print('''Saving PyTorch model to {}'''.format(UpperCamelCase__ ) ) model.save_pretrained(UpperCamelCase__ ) def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] ): _UpperCAmelCase : Any = {} with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f: for index, line in enumerate(UpperCamelCase__ ): _UpperCAmelCase , _UpperCAmelCase : Any = line.rstrip().split('''\t''' ) _UpperCAmelCase : Tuple = index return entity_vocab if __name__ == "__main__": _lowerCAmelCase :List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Path to a pytorch_model.bin file.') parser.add_argument( '--metadata_path', default=None, type=str, help='Path to a metadata.json file, defining the configuration.' ) parser.add_argument( '--entity_vocab_path', default=None, type=str, help='Path to an entity_vocab.tsv file, containing the entity vocabulary.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to where to dump the output PyTorch model.' ) parser.add_argument( '--model_size', default='base', type=str, choices=['base', 'large'], help='Size of the model to be converted.' ) _lowerCAmelCase :Any = parser.parse_args() convert_luke_checkpoint( args.checkpoint_path, args.metadata_path, args.entity_vocab_path, args.pytorch_dump_folder_path, args.model_size, )
263
1
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels _lowerCAmelCase :str = object() # For specifying empty leaf dict `{}` _lowerCAmelCase :str = object() def lowerCamelCase_ (UpperCamelCase__ : List[str] , UpperCamelCase__ : int ): _UpperCAmelCase : Dict = tuple((re.compile(x + '''$''' ) for x in qs) ) for i in range(len(UpperCamelCase__ ) - len(UpperCamelCase__ ) + 1 ): _UpperCAmelCase : str = [x.match(UpperCamelCase__ ) for x, y in zip(UpperCamelCase__ , ks[i:] )] if matches and all(UpperCamelCase__ ): return True return False def lowerCamelCase_ (UpperCamelCase__ : List[str] ): def replace(UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ): for rule, replacement in rules: if _match(UpperCamelCase__ , UpperCamelCase__ ): return replacement return val return replace def lowerCamelCase_ (): return [ # embeddings (("transformer", "wpe", "embedding"), P('''mp''' , UpperCamelCase__ )), (("transformer", "wte", "embedding"), P('''mp''' , UpperCamelCase__ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("attention", "out_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(UpperCamelCase__ , '''mp''' )), (("mlp", "c_fc", "bias"), P('''mp''' )), (("mlp", "c_proj", "kernel"), P('''mp''' , UpperCamelCase__ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def lowerCamelCase_ (UpperCamelCase__ : str ): _UpperCAmelCase : List[str] = _get_partition_rules() _UpperCAmelCase : List[str] = _replacement_rules(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = {k: _unmatched for k in flatten_dict(UpperCamelCase__ )} _UpperCAmelCase : int = {k: replace(UpperCamelCase__ , UpperCamelCase__ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(UpperCamelCase__ ) )
263
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCAmelCase :Any = False @skip_mps class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ): '''simple docstring''' a__ =StableDiffusionAttendAndExcitePipeline a__ =False a__ =TEXT_TO_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) a__ =TEXT_TO_IMAGE_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __lowerCAmelCase ( cls ) -> List[str]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , ) _UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) _UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _UpperCAmelCase : List[str] = CLIPTextModel(A ) _UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCAmelCase : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: _UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : List[str] = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = '''cpu''' _UpperCAmelCase : Tuple = self.get_dummy_components() _UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Dict = self.get_dummy_inputs(A ) _UpperCAmelCase : Union[str, Any] = pipe(**A ).images _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) _UpperCAmelCase : int = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) _UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1E-3 ) def __lowerCAmelCase ( self ) -> Dict: super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> str: super().test_save_load_local(expected_max_difference=5E-4 ) def __lowerCAmelCase ( self ) -> Optional[int]: super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = torch.manual_seed(5_1 ) _UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses''' _UpperCAmelCase : int = [5, 7] _UpperCAmelCase : Dict = pipe( prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _UpperCAmelCase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
263
"""simple docstring""" import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @require_torch def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : str = pipeline( task='''zero-shot-audio-classification''' , model='''hf-internal-testing/tiny-clap-htsat-unfused''' ) _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : str = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [{'''score''': 0.501, '''label''': '''Sound of a dog'''}, {'''score''': 0.499, '''label''': '''Sound of vaccum cleaner'''}] , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> Union[str, Any]: pass @slow @require_torch def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Union[str, Any] = pipeline( task='''zero-shot-audio-classification''' , model='''laion/clap-htsat-unfused''' , ) # This is an audio of a dog _UpperCAmelCase : List[Any] = load_dataset('''ashraq/esc50''' ) _UpperCAmelCase : Optional[int] = dataset['''train''']['''audio'''][-1]['''array'''] _UpperCAmelCase : Any = audio_classifier(A , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ] , ) _UpperCAmelCase : List[Any] = audio_classifier([audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) _UpperCAmelCase : Tuple = audio_classifier( [audio] * 5 , candidate_labels=['''Sound of a dog''', '''Sound of vaccum cleaner'''] , batch_size=5 ) self.assertEqual( nested_simplify(A ) , [ [ {'''score''': 0.999, '''label''': '''Sound of a dog'''}, {'''score''': 0.001, '''label''': '''Sound of vaccum cleaner'''}, ], ] * 5 , ) @unittest.skip('''No models are available in TF''' ) def __lowerCAmelCase ( self ) -> int: pass
263
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import DistilBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.distilbert.modeling_tf_distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertModel, ) class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , ) -> Union[str, Any]: _UpperCAmelCase : str = parent _UpperCAmelCase : Optional[int] = 1_3 _UpperCAmelCase : List[str] = 7 _UpperCAmelCase : Union[str, Any] = True _UpperCAmelCase : Any = True _UpperCAmelCase : Dict = False _UpperCAmelCase : int = True _UpperCAmelCase : Optional[int] = 9_9 _UpperCAmelCase : Any = 3_2 _UpperCAmelCase : Dict = 2 _UpperCAmelCase : List[str] = 4 _UpperCAmelCase : Optional[int] = 3_7 _UpperCAmelCase : List[str] = '''gelu''' _UpperCAmelCase : int = 0.1 _UpperCAmelCase : Optional[Any] = 0.1 _UpperCAmelCase : Any = 5_1_2 _UpperCAmelCase : Union[str, Any] = 1_6 _UpperCAmelCase : Optional[int] = 2 _UpperCAmelCase : List[Any] = 0.02 _UpperCAmelCase : Dict = 3 _UpperCAmelCase : Any = 4 _UpperCAmelCase : Optional[int] = None def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase : str = None if self.use_input_mask: _UpperCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) _UpperCAmelCase : str = None _UpperCAmelCase : Dict = None _UpperCAmelCase : Union[str, Any] = None if self.use_labels: _UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) _UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.num_choices ) _UpperCAmelCase : str = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> List[str]: _UpperCAmelCase : Optional[Any] = TFDistilBertModel(config=A ) _UpperCAmelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask} _UpperCAmelCase : str = model(A ) _UpperCAmelCase : Optional[Any] = [input_ids, input_mask] _UpperCAmelCase : Optional[int] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Tuple: _UpperCAmelCase : Dict = TFDistilBertForMaskedLM(config=A ) _UpperCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} _UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Optional[int]: _UpperCAmelCase : str = TFDistilBertForQuestionAnswering(config=A ) _UpperCAmelCase : str = { '''input_ids''': input_ids, '''attention_mask''': input_mask, } _UpperCAmelCase : Dict = model(A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Optional[int]: _UpperCAmelCase : List[Any] = self.num_labels _UpperCAmelCase : List[Any] = TFDistilBertForSequenceClassification(A ) _UpperCAmelCase : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask} _UpperCAmelCase : Optional[Any] = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> int: _UpperCAmelCase : Union[str, Any] = self.num_choices _UpperCAmelCase : Optional[Any] = TFDistilBertForMultipleChoice(A ) _UpperCAmelCase : str = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase : int = tf.tile(tf.expand_dims(A , 1 ) , (1, self.num_choices, 1) ) _UpperCAmelCase : Optional[Any] = { '''input_ids''': multiple_choice_inputs_ids, '''attention_mask''': multiple_choice_input_mask, } _UpperCAmelCase : Optional[int] = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCAmelCase ( self , A , A , A , A , A , A ) -> Optional[int]: _UpperCAmelCase : Union[str, Any] = self.num_labels _UpperCAmelCase : str = TFDistilBertForTokenClassification(A ) _UpperCAmelCase : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} _UpperCAmelCase : Any = model(A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ((_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase) , (_UpperCAmelCase)) : Any = config_and_inputs _UpperCAmelCase : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_tf class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =( ( TFDistilBertModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertForMultipleChoice, ) if is_tf_available() else None ) a__ =( { '''feature-extraction''': TFDistilBertModel, '''fill-mask''': TFDistilBertForMaskedLM, '''question-answering''': TFDistilBertForQuestionAnswering, '''text-classification''': TFDistilBertForSequenceClassification, '''token-classification''': TFDistilBertForTokenClassification, '''zero-shot''': TFDistilBertForSequenceClassification, } if is_tf_available() else {} ) a__ =False a__ =False def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = TFDistilBertModelTester(self ) _UpperCAmelCase : int = ConfigTester(self , config_class=A , dim=3_7 ) def __lowerCAmelCase ( self ) -> Any: self.config_tester.run_common_tests() def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_model(*A ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_masked_lm(*A ) def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_question_answering(*A ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_sequence_classification(*A ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_multiple_choice(*A ) def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_distilbert_for_token_classification(*A ) @slow def __lowerCAmelCase ( self ) -> Optional[Any]: for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ): _UpperCAmelCase : int = TFDistilBertModel.from_pretrained(A ) self.assertIsNotNone(A ) @require_tf class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @slow def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[Any] = TFDistilBertModel.from_pretrained('''distilbert-base-uncased''' ) _UpperCAmelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] ) _UpperCAmelCase : Any = model(A )[0] _UpperCAmelCase : Optional[Any] = [1, 6, 7_6_8] self.assertEqual(output.shape , A ) _UpperCAmelCase : int = tf.constant( [ [ [0.19_261_885, -0.13_732_955, 0.4_119_799], [0.22_150_156, -0.07_422_661, 0.39_037_204], [0.22_756_018, -0.0_896_414, 0.3_701_467], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , A , atol=1E-4 )
263
"""simple docstring""" import inspect import logging import os import random import shutil import tempfile import unittest import pytest import torch from torch import nn from torch.utils.data import DataLoader, TensorDataset from accelerate import Accelerator from accelerate.test_utils import execute_subprocess_async, require_cuda from accelerate.utils import ProjectConfiguration, set_seed _lowerCAmelCase :Tuple = logging.getLogger(__name__) def lowerCamelCase_ (UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int = 10 , UpperCamelCase__ : int = 2 ): def get_dataset(UpperCamelCase__ : List[str] ): _UpperCAmelCase : Optional[Any] = torch.randn(batch_size * n_batches , 1 ) return TensorDataset(UpperCamelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = get_dataset(UpperCamelCase__ ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) _UpperCAmelCase : List[str] = DataLoader(UpperCamelCase__ , shuffle=UpperCamelCase__ , batch_size=UpperCamelCase__ , num_workers=4 ) return (train_dataloader, valid_dataloader) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=None ): _UpperCAmelCase : Tuple = [] for epoch in range(UpperCamelCase__ ): # Train quickly model.train() for batch in dataloader: _UpperCAmelCase , _UpperCAmelCase : Dict = batch _UpperCAmelCase : int = model(UpperCamelCase__ ) _UpperCAmelCase : Dict = torch.nn.functional.mse_loss(UpperCamelCase__ , UpperCamelCase__ ) accelerator.backward(UpperCamelCase__ ) optimizer.step() optimizer.zero_grad() rands.append(random.random() ) # Introduce some randomness if scheduler is not None: scheduler.step() return rands class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self ) -> List[Any]: super().__init__() _UpperCAmelCase : List[Any] = nn.Parameter(torch.randn(1 ) ) _UpperCAmelCase : int = nn.Parameter(torch.randn(1 ) ) def __lowerCAmelCase ( self , A ) -> Tuple: return x * self.a + self.b class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> Any: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : Any = ProjectConfiguration(total_limit=1 , project_dir=A , automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() # Save second state accelerator.save_state() self.assertEqual(len(os.listdir(accelerator.project_dir ) ) , 1 ) def __lowerCAmelCase ( self ) -> List[str]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Optional[Any] = DummyModel() _UpperCAmelCase : int = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Dict = dummy_dataloaders() # Train baseline _UpperCAmelCase : Optional[int] = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = accelerator.prepare( A , A , A , A ) # Save initial _UpperCAmelCase : Union[str, Any] = os.path.join(A , '''initial''' ) accelerator.save_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Optional[Any] = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() _UpperCAmelCase : Tuple = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : List[Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : Dict = DummyModel() _UpperCAmelCase : Optional[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = dummy_dataloaders() _UpperCAmelCase : Tuple = Accelerator() _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A ) accelerator.load_state(A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : List[str] = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : Union[str, Any] = train(2 , A , A , A , A ) # Save everything _UpperCAmelCase : List[str] = os.path.join(A , '''checkpoint''' ) accelerator.save_state(A ) # Load everything back in and make sure all states work accelerator.load_state(A ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> int: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : List[str] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : List[Any] = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : str = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Any = accelerator.prepare( A , A , A , A ) # Save initial accelerator.save_state() ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Dict = optimizer.state_dict() _UpperCAmelCase : int = train(3 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = model.a.item(), model.b.item() _UpperCAmelCase : Union[str, Any] = optimizer.state_dict() # Train partially set_seed(4_2 ) _UpperCAmelCase : List[Any] = DummyModel() _UpperCAmelCase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase , _UpperCAmelCase : Any = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=A ) _UpperCAmelCase : Tuple = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : int = accelerator.prepare( A , A , A , A ) accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : Dict = model.a.item(), model.b.item() _UpperCAmelCase : str = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) _UpperCAmelCase : List[str] = train(2 , A , A , A , A ) # Save everything accelerator.save_state() # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_1''' ) ) test_rands += train(1 , A , A , A , A ) ((_UpperCAmelCase) , (_UpperCAmelCase)) : List[str] = model.a.item(), model.b.item() _UpperCAmelCase : Tuple = optimizer.state_dict() self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) self.assertEqual(A , A ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[Any] = torch.tensor([1, 2, 3] ) _UpperCAmelCase : List[str] = torch.tensor([2, 3, 4] ) _UpperCAmelCase : Optional[int] = DummyModel() _UpperCAmelCase : Dict = torch.optim.Adam(net.parameters() ) _UpperCAmelCase : Optional[int] = Accelerator() with self.assertRaises(A ) as ve: accelerator.register_for_checkpointing(A , A , A , A ) _UpperCAmelCase : Dict = str(ve.exception ) self.assertTrue('''Item at index 0''' in message ) self.assertTrue('''Item at index 1''' in message ) self.assertFalse('''Item at index 2''' in message ) self.assertFalse('''Item at index 3''' in message ) def __lowerCAmelCase ( self ) -> Tuple: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : Tuple = DummyModel() _UpperCAmelCase : List[Any] = torch.optim.Adam(params=model.parameters() , lr=1E-3 ) _UpperCAmelCase : Optional[int] = torch.optim.lr_scheduler.StepLR(A , step_size=1 , gamma=0.99 ) _UpperCAmelCase , _UpperCAmelCase : str = dummy_dataloaders() _UpperCAmelCase : List[str] = ProjectConfiguration(automatic_checkpoint_naming=A ) # Train baseline _UpperCAmelCase : int = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = accelerator.prepare( A , A , A , A , A ) # Save initial accelerator.save_state() _UpperCAmelCase : List[str] = scheduler.state_dict() train(3 , A , A , A , A , A ) self.assertNotEqual(A , scheduler.state_dict() ) # Load everything back in and make sure all states work accelerator.load_state(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) self.assertEqual(A , scheduler.state_dict() ) def __lowerCAmelCase ( self ) -> Optional[Any]: with tempfile.TemporaryDirectory() as tmpdir: set_seed(4_2 ) _UpperCAmelCase : int = DummyModel() _UpperCAmelCase : str = ProjectConfiguration(automatic_checkpoint_naming=A , total_limit=2 ) # Train baseline _UpperCAmelCase : Union[str, Any] = Accelerator(project_dir=A , project_config=A ) _UpperCAmelCase : Optional[Any] = accelerator.prepare(A ) # Save 3 states: for _ in range(1_1 ): accelerator.save_state() self.assertTrue(not os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_0''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_9''' ) ) ) self.assertTrue(os.path.exists(os.path.join(A , '''checkpoints''' , '''checkpoint_10''' ) ) ) @require_cuda def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : str = ['''torchrun''', f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )] execute_subprocess_async(A , env=os.environ.copy() ) if __name__ == "__main__": _lowerCAmelCase :Dict = '/tmp/accelerate/state_checkpointing' _lowerCAmelCase :Any = DummyModel() _lowerCAmelCase :Tuple = torch.optim.Adam(params=model.parameters(), lr=1E-3) _lowerCAmelCase :Dict = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99) _lowerCAmelCase,_lowerCAmelCase :Any = dummy_dataloaders() _lowerCAmelCase :Tuple = ProjectConfiguration(automatic_checkpoint_naming=True) # Train baseline _lowerCAmelCase :Optional[Any] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no') if accelerator.process_index == 0: if os.path.exists(savedir): shutil.rmtree(savedir) os.makedirs(savedir) _lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase,_lowerCAmelCase :str = accelerator.prepare( model, optimizer, train_dataloader, valid_dataloader, scheduler ) _lowerCAmelCase,_lowerCAmelCase :List[Any] = accelerator.prepare(model, optimizer) train(3, model, train_dataloader, optimizer, accelerator, scheduler) # Check that the intial optimizer is loaded on the GPU for group in optimizer.param_groups: _lowerCAmelCase :int = group['params'][0].device break assert param_device.type == accelerator.device.type _lowerCAmelCase :Dict = model.cpu() accelerator.wait_for_everyone() accelerator.save_state() accelerator.wait_for_everyone() # Check CPU state accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu') for group in optimizer.param_groups: _lowerCAmelCase :List[Any] = group['params'][0].device break assert ( param_device.type == torch.device('cpu').type ), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}" # Check device state model.to(accelerator.device) accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device') for group in optimizer.param_groups: _lowerCAmelCase :Union[str, Any] = group['params'][0].device break assert ( param_device.type == accelerator.device.type ), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}" # Check error with pytest.raises(TypeError, match='Unsupported optimizer map location passed'): accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid') accelerator.wait_for_everyone() if accelerator.process_index == 0: shutil.rmtree(savedir) accelerator.wait_for_everyone()
263
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor _lowerCAmelCase :Optional[int] = logging.get_logger(__name__) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , *A , **A ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , A , ) super().__init__(*A , **A )
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase :str = { 'configuration_squeezebert': [ 'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SqueezeBertConfig', 'SqueezeBertOnnxConfig', ], 'tokenization_squeezebert': ['SqueezeBertTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['SqueezeBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :str = [ 'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'SqueezeBertForMaskedLM', 'SqueezeBertForMultipleChoice', 'SqueezeBertForQuestionAnswering', 'SqueezeBertForSequenceClassification', 'SqueezeBertForTokenClassification', 'SqueezeBertModel', 'SqueezeBertModule', 'SqueezeBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_squeezebert import ( SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertOnnxConfig, ) from .tokenization_squeezebert import SqueezeBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) else: import sys _lowerCAmelCase :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : int = tempfile.mkdtemp() _UpperCAmelCase : int = BlipImageProcessor() _UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained('''hf-internal-testing/tiny-random-BertModel''' ) _UpperCAmelCase : Union[str, Any] = BlipProcessor(A , A ) processor.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self , **A ) -> Any: return AutoProcessor.from_pretrained(self.tmpdirname , **A ).tokenizer def __lowerCAmelCase ( self , **A ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **A ).image_processor def __lowerCAmelCase ( self ) -> Dict: shutil.rmtree(self.tmpdirname ) def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : str = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] _UpperCAmelCase : int = [Image.fromarray(np.moveaxis(A , 0 , -1 ) ) for x in image_inputs] return image_inputs def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : int = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) _UpperCAmelCase : Optional[Any] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) _UpperCAmelCase : Optional[int] = self.get_image_processor(do_normalize=A , padding_value=1.0 ) _UpperCAmelCase : Optional[int] = BlipProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , A ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , A ) def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : int = self.get_image_processor() _UpperCAmelCase : Tuple = self.get_tokenizer() _UpperCAmelCase : List[str] = BlipProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : int = self.prepare_image_inputs() _UpperCAmelCase : int = image_processor(A , return_tensors='''np''' ) _UpperCAmelCase : List[Any] = processor(images=A , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : Optional[Any] = self.get_image_processor() _UpperCAmelCase : int = self.get_tokenizer() _UpperCAmelCase : List[Any] = BlipProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : Dict = '''lower newer''' _UpperCAmelCase : Union[str, Any] = processor(text=A ) _UpperCAmelCase : List[Any] = tokenizer(A , return_token_type_ids=A ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Any = self.get_image_processor() _UpperCAmelCase : List[str] = self.get_tokenizer() _UpperCAmelCase : List[str] = BlipProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : int = '''lower newer''' _UpperCAmelCase : List[str] = self.prepare_image_inputs() _UpperCAmelCase : Union[str, Any] = processor(text=A , images=A ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] ) # test if it raises when no input is passed with pytest.raises(A ): processor() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : int = self.get_image_processor() _UpperCAmelCase : str = self.get_tokenizer() _UpperCAmelCase : List[Any] = BlipProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] _UpperCAmelCase : List[Any] = processor.batch_decode(A ) _UpperCAmelCase : Union[str, Any] = tokenizer.batch_decode(A ) self.assertListEqual(A , A ) def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Union[str, Any] = self.get_image_processor() _UpperCAmelCase : int = self.get_tokenizer() _UpperCAmelCase : List[str] = BlipProcessor(tokenizer=A , image_processor=A ) _UpperCAmelCase : Optional[Any] = '''lower newer''' _UpperCAmelCase : str = self.prepare_image_inputs() _UpperCAmelCase : Union[str, Any] = processor(text=A , images=A ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''input_ids''', '''attention_mask'''] )
263
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _lowerCAmelCase :List[Any] = {'configuration_opt': ['OPT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'OPTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'OPT_PRETRAINED_MODEL_ARCHIVE_LIST', 'OPTForCausalLM', 'OPTModel', 'OPTPreTrainedModel', 'OPTForSequenceClassification', 'OPTForQuestionAnswering', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Optional[int] = ['TFOPTForCausalLM', 'TFOPTModel', 'TFOPTPreTrainedModel'] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase :Any = [ 'FlaxOPTForCausalLM', 'FlaxOPTModel', 'FlaxOPTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_opt import ( OPT_PRETRAINED_MODEL_ARCHIVE_LIST, OPTForCausalLM, OPTForQuestionAnswering, OPTForSequenceClassification, OPTModel, OPTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel else: import sys _lowerCAmelCase :int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
263
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class _UpperCAmelCase : '''simple docstring''' def __init__( self , A ) -> Union[str, Any]: _UpperCAmelCase : Any = data _UpperCAmelCase : Node | None = None class _UpperCAmelCase : '''simple docstring''' def __init__( self ) -> Optional[int]: _UpperCAmelCase : Any = None _UpperCAmelCase : List[str] = None def __iter__( self ) -> Iterator[Any]: _UpperCAmelCase : Tuple = self.head while self.head: yield node.data _UpperCAmelCase : str = node.next if node == self.head: break def __len__( self ) -> int: return sum(1 for _ in self ) def __repr__( self ) -> str: return "->".join(str(A ) for item in iter(self ) ) def __lowerCAmelCase ( self , A ) -> None: self.insert_nth(len(self ) , A ) def __lowerCAmelCase ( self , A ) -> None: self.insert_nth(0 , A ) def __lowerCAmelCase ( self , A , A ) -> None: if index < 0 or index > len(self ): raise IndexError('''list index out of range.''' ) _UpperCAmelCase : Optional[Any] = Node(A ) if self.head is None: _UpperCAmelCase : List[Any] = new_node # first node points itself _UpperCAmelCase : List[str] = new_node elif index == 0: # insert at head _UpperCAmelCase : Union[str, Any] = self.head _UpperCAmelCase : Any = new_node else: _UpperCAmelCase : Union[str, Any] = self.head for _ in range(index - 1 ): _UpperCAmelCase : List[Any] = temp.next _UpperCAmelCase : List[str] = temp.next _UpperCAmelCase : Optional[int] = new_node if index == len(self ) - 1: # insert at tail _UpperCAmelCase : List[str] = new_node def __lowerCAmelCase ( self ) -> Tuple: return self.delete_nth(0 ) def __lowerCAmelCase ( self ) -> Any: return self.delete_nth(len(self ) - 1 ) def __lowerCAmelCase ( self , A = 0 ) -> Any: if not 0 <= index < len(self ): raise IndexError('''list index out of range.''' ) _UpperCAmelCase : Optional[int] = self.head if self.head == self.tail: # just one node _UpperCAmelCase : str = None elif index == 0: # delete head node _UpperCAmelCase : Optional[int] = self.tail.next.next _UpperCAmelCase : str = self.head.next else: _UpperCAmelCase : List[Any] = self.head for _ in range(index - 1 ): _UpperCAmelCase : Tuple = temp.next _UpperCAmelCase : Optional[int] = temp.next _UpperCAmelCase : Tuple = temp.next.next if index == len(self ) - 1: # delete at tail _UpperCAmelCase : List[str] = temp return delete_node.data def __lowerCAmelCase ( self ) -> bool: return len(self ) == 0 def lowerCamelCase_ (): _UpperCAmelCase : str = CircularLinkedList() assert len(UpperCamelCase__ ) == 0 assert circular_linked_list.is_empty() is True assert str(UpperCamelCase__ ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(UpperCamelCase__ ) == i circular_linked_list.insert_nth(UpperCamelCase__ , i + 1 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(UpperCamelCase__ ) == "->".join(str(UpperCamelCase__ ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" import random import unittest import torch from diffusers import IFImgaImgSuperResolutionPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class _UpperCAmelCase ( a ,a ,unittest.TestCase ): '''simple docstring''' a__ =IFImgaImgSuperResolutionPipeline a__ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''width''', '''height'''} a__ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''original_image'''} ) a__ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __lowerCAmelCase ( self ) -> List[str]: return self._get_superresolution_dummy_components() def __lowerCAmelCase ( self , A , A=0 ) -> Union[str, Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Any = torch.manual_seed(A ) else: _UpperCAmelCase : int = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : str = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : Dict = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(A ) ).to(A ) _UpperCAmelCase : List[Any] = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''original_image''': original_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , ) def __lowerCAmelCase ( self ) -> List[Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) def __lowerCAmelCase ( self ) -> List[str]: self._test_save_load_optional_components() @unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1E-1 ) def __lowerCAmelCase ( self ) -> int: self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_save_load_local() def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical( expected_max_diff=1E-2 , )
263
1
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ): if a < 0 or b < 0: raise ValueError('''the value of both inputs must be positive''' ) _UpperCAmelCase : List[str] = str(bin(UpperCamelCase__ ) )[2:] # remove the leading "0b" _UpperCAmelCase : str = str(bin(UpperCamelCase__ ) )[2:] _UpperCAmelCase : List[str] = max(len(UpperCamelCase__ ) , len(UpperCamelCase__ ) ) return "0b" + "".join( str(int('''1''' in (char_a, char_b) ) ) for char_a, char_b in zip(a_binary.zfill(UpperCamelCase__ ) , b_binary.zfill(UpperCamelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" def lowerCamelCase_ (UpperCamelCase__ : int ): if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) _UpperCAmelCase : str = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
263
1
"""simple docstring""" import unittest import numpy as np import torch from torch import nn from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler from diffusers.utils import torch_device from diffusers.utils.testing_utils import enable_full_determinism, skip_mps from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _UpperCAmelCase ( a ,unittest.TestCase ): '''simple docstring''' a__ =KandinskyVaaPriorPipeline a__ =['''prompt'''] a__ =['''prompt''', '''negative_prompt'''] a__ =[ '''num_images_per_prompt''', '''generator''', '''num_inference_steps''', '''latents''', '''negative_prompt''', '''guidance_scale''', '''output_type''', '''return_dict''', ] a__ =False @property def __lowerCAmelCase ( self ) -> List[str]: return 3_2 @property def __lowerCAmelCase ( self ) -> Tuple: return 3_2 @property def __lowerCAmelCase ( self ) -> Union[str, Any]: return self.time_input_dim @property def __lowerCAmelCase ( self ) -> List[Any]: return self.time_input_dim * 4 @property def __lowerCAmelCase ( self ) -> Any: return 1_0_0 @property def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) return tokenizer @property def __lowerCAmelCase ( self ) -> List[str]: torch.manual_seed(0 ) _UpperCAmelCase : Any = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) return CLIPTextModelWithProjection(A ) @property def __lowerCAmelCase ( self ) -> Any: torch.manual_seed(0 ) _UpperCAmelCase : Any = { '''num_attention_heads''': 2, '''attention_head_dim''': 1_2, '''embedding_dim''': self.text_embedder_hidden_size, '''num_layers''': 1, } _UpperCAmelCase : Tuple = PriorTransformer(**A ) # clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0 _UpperCAmelCase : Any = nn.Parameter(torch.ones(model.clip_std.shape ) ) return model @property def __lowerCAmelCase ( self ) -> Optional[int]: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=2_2_4 , projection_dim=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1_4 , ) _UpperCAmelCase : Optional[Any] = CLIPVisionModelWithProjection(A ) return model @property def __lowerCAmelCase ( self ) -> Tuple: _UpperCAmelCase : Tuple = CLIPImageProcessor( crop_size=2_2_4 , do_center_crop=A , do_normalize=A , do_resize=A , image_mean=[0.48_145_466, 0.4_578_275, 0.40_821_073] , image_std=[0.26_862_954, 0.26_130_258, 0.27_577_711] , resample=3 , size=2_2_4 , ) return image_processor def __lowerCAmelCase ( self ) -> Union[str, Any]: _UpperCAmelCase : Optional[int] = self.dummy_prior _UpperCAmelCase : List[str] = self.dummy_image_encoder _UpperCAmelCase : Tuple = self.dummy_text_encoder _UpperCAmelCase : int = self.dummy_tokenizer _UpperCAmelCase : int = self.dummy_image_processor _UpperCAmelCase : Optional[int] = UnCLIPScheduler( variance_type='''fixed_small_log''' , prediction_type='''sample''' , num_train_timesteps=1_0_0_0 , clip_sample=A , clip_sample_range=10.0 , ) _UpperCAmelCase : List[Any] = { '''prior''': prior, '''image_encoder''': image_encoder, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''scheduler''': scheduler, '''image_processor''': image_processor, } return components def __lowerCAmelCase ( self , A , A=0 ) -> List[str]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Optional[Any] = torch.manual_seed(A ) else: _UpperCAmelCase : str = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : Dict = { '''prompt''': '''horse''', '''generator''': generator, '''guidance_scale''': 4.0, '''num_inference_steps''': 2, '''output_type''': '''np''', } return inputs def __lowerCAmelCase ( self ) -> Any: _UpperCAmelCase : int = '''cpu''' _UpperCAmelCase : Tuple = self.get_dummy_components() _UpperCAmelCase : int = self.pipeline_class(**A ) _UpperCAmelCase : Dict = pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Union[str, Any] = pipe(**self.get_dummy_inputs(A ) ) _UpperCAmelCase : Dict = output.image_embeds _UpperCAmelCase : Optional[Any] = pipe( **self.get_dummy_inputs(A ) , return_dict=A , )[0] _UpperCAmelCase : Union[str, Any] = image[0, -1_0:] _UpperCAmelCase : Optional[int] = image_from_tuple[0, -1_0:] assert image.shape == (1, 3_2) _UpperCAmelCase : str = np.array( [-0.0_532, 1.7_120, 0.3_656, -1.0_852, -0.8_946, -1.1_756, 0.4_348, 0.2_482, 0.5_146, -0.1_156] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : Optional[int] = torch_device == '''cpu''' _UpperCAmelCase : Tuple = True _UpperCAmelCase : List[str] = False self._test_inference_batch_single_identical( test_max_difference=A , relax_max_difference=A , test_mean_pixel_difference=A , ) @skip_mps def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Optional[int] = torch_device == '''cpu''' _UpperCAmelCase : Union[str, Any] = False self._test_attention_slicing_forward_pass( test_max_difference=A , test_mean_pixel_difference=A , )
263
"""simple docstring""" import argparse import OmegaConf import torch from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel def lowerCamelCase_ (UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[Any] ): _UpperCAmelCase : int = OmegaConf.load(UpperCamelCase__ ) _UpperCAmelCase : str = torch.load(UpperCamelCase__ , map_location='''cpu''' )['''model'''] _UpperCAmelCase : Optional[Any] = list(state_dict.keys() ) # extract state_dict for VQVAE _UpperCAmelCase : Any = {} _UpperCAmelCase : Any = '''first_stage_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] # extract state_dict for UNetLDM _UpperCAmelCase : Tuple = {} _UpperCAmelCase : int = '''model.diffusion_model.''' for key in keys: if key.startswith(UpperCamelCase__ ): _UpperCAmelCase : Dict = state_dict[key] _UpperCAmelCase : List[str] = config.model.params.first_stage_config.params _UpperCAmelCase : Union[str, Any] = config.model.params.unet_config.params _UpperCAmelCase : Any = VQModel(**UpperCamelCase__ ).eval() vqvae.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : Union[str, Any] = UNetLDMModel(**UpperCamelCase__ ).eval() unet.load_state_dict(UpperCamelCase__ ) _UpperCAmelCase : int = DDIMScheduler( timesteps=config.model.params.timesteps , beta_schedule='''scaled_linear''' , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=UpperCamelCase__ , ) _UpperCAmelCase : Optional[Any] = LDMPipeline(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) pipeline.save_pretrained(UpperCamelCase__ ) if __name__ == "__main__": _lowerCAmelCase :Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--checkpoint_path', type=str, required=True) parser.add_argument('--config_path', type=str, required=True) parser.add_argument('--output_path', type=str, required=True) _lowerCAmelCase :List[Any] = parser.parse_args() convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
263
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig _lowerCAmelCase :int = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring _lowerCAmelCase :Tuple = 'UperNetConfig' class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , A , A , A , A = 0 , A = False , A = 1 , ) -> None: super().__init__() _UpperCAmelCase : Optional[Any] = nn.Convad( in_channels=A , out_channels=A , kernel_size=A , padding=A , bias=A , dilation=A , ) _UpperCAmelCase : List[str] = nn.BatchNormad(A ) _UpperCAmelCase : Optional[Any] = nn.ReLU() def __lowerCAmelCase ( self , A ) -> torch.Tensor: _UpperCAmelCase : List[Any] = self.conv(A ) _UpperCAmelCase : Optional[Any] = self.batch_norm(A ) _UpperCAmelCase : Union[str, Any] = self.activation(A ) return output class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , A , A , A ) -> None: super().__init__() _UpperCAmelCase : str = [ nn.AdaptiveAvgPoolad(A ), UperNetConvModule(A , A , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(A ) , A ) def __lowerCAmelCase ( self , A ) -> torch.Tensor: _UpperCAmelCase : Optional[int] = input for layer in self.layers: _UpperCAmelCase : Any = layer(A ) return hidden_state class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , A , A , A , A ) -> None: super().__init__() _UpperCAmelCase : str = pool_scales _UpperCAmelCase : Optional[int] = align_corners _UpperCAmelCase : int = in_channels _UpperCAmelCase : Union[str, Any] = channels _UpperCAmelCase : Tuple = [] for i, pool_scale in enumerate(A ): _UpperCAmelCase : int = UperNetPyramidPoolingBlock(pool_scale=A , in_channels=A , channels=A ) self.blocks.append(A ) self.add_module(str(A ) , A ) def __lowerCAmelCase ( self , A ) -> List[torch.Tensor]: _UpperCAmelCase : Tuple = [] for ppm in self.blocks: _UpperCAmelCase : int = ppm(A ) _UpperCAmelCase : int = nn.functional.interpolate( A , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners ) ppm_outs.append(A ) return ppm_outs class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , A , A ) -> Union[str, Any]: super().__init__() _UpperCAmelCase : List[Any] = config _UpperCAmelCase : Optional[int] = config.pool_scales # e.g. (1, 2, 3, 6) _UpperCAmelCase : int = in_channels _UpperCAmelCase : int = config.hidden_size _UpperCAmelCase : int = False _UpperCAmelCase : Optional[int] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module _UpperCAmelCase : int = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) _UpperCAmelCase : str = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module _UpperCAmelCase : Optional[int] = nn.ModuleList() _UpperCAmelCase : str = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer _UpperCAmelCase : Tuple = UperNetConvModule(A , self.channels , kernel_size=1 ) _UpperCAmelCase : Dict = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(A ) self.fpn_convs.append(A ) _UpperCAmelCase : Optional[Any] = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def __lowerCAmelCase ( self ) -> Dict: self.apply(self._init_weights ) def __lowerCAmelCase ( self , A ) -> Dict: if isinstance(A , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __lowerCAmelCase ( self , A ) -> Dict: _UpperCAmelCase : List[str] = inputs[-1] _UpperCAmelCase : Any = [x] psp_outs.extend(self.psp_modules(A ) ) _UpperCAmelCase : Union[str, Any] = torch.cat(A , dim=1 ) _UpperCAmelCase : Optional[Any] = self.bottleneck(A ) return output def __lowerCAmelCase ( self , A ) -> torch.Tensor: # build laterals _UpperCAmelCase : Tuple = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(A ) ) # build top-down path _UpperCAmelCase : Tuple = len(A ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _UpperCAmelCase : Dict = laterals[i - 1].shape[2:] _UpperCAmelCase : Union[str, Any] = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=A , mode='''bilinear''' , align_corners=self.align_corners ) # build outputs _UpperCAmelCase : List[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): _UpperCAmelCase : Optional[int] = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners ) _UpperCAmelCase : Tuple = torch.cat(A , dim=1 ) _UpperCAmelCase : Tuple = self.fpn_bottleneck(A ) _UpperCAmelCase : Dict = self.classifier(A ) return output class _UpperCAmelCase ( nn.Module ): '''simple docstring''' def __init__( self , A , A = 2 , A = 3 , A = 1 ) -> None: super().__init__() _UpperCAmelCase : int = config _UpperCAmelCase : str = config.auxiliary_in_channels _UpperCAmelCase : Optional[int] = config.auxiliary_channels _UpperCAmelCase : List[Any] = config.auxiliary_num_convs _UpperCAmelCase : Optional[int] = config.auxiliary_concat_input _UpperCAmelCase : List[str] = in_index _UpperCAmelCase : Optional[Any] = (kernel_size // 2) * dilation _UpperCAmelCase : Tuple = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=A , padding=A , dilation=A ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=A , padding=A , dilation=A ) ) if self.num_convs == 0: _UpperCAmelCase : Optional[int] = nn.Identity() else: _UpperCAmelCase : int = nn.Sequential(*A ) if self.concat_input: _UpperCAmelCase : int = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=A , padding=kernel_size // 2 ) _UpperCAmelCase : Optional[Any] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def __lowerCAmelCase ( self ) -> Optional[Any]: self.apply(self._init_weights ) def __lowerCAmelCase ( self , A ) -> List[str]: if isinstance(A , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __lowerCAmelCase ( self , A ) -> torch.Tensor: # just take the relevant feature maps _UpperCAmelCase : int = encoder_hidden_states[self.in_index] _UpperCAmelCase : Tuple = self.convs(A ) if self.concat_input: _UpperCAmelCase : Optional[int] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) _UpperCAmelCase : int = self.classifier(A ) return output class _UpperCAmelCase ( a ): '''simple docstring''' a__ =UperNetConfig a__ ='''pixel_values''' a__ =True def __lowerCAmelCase ( self , A ) -> Optional[int]: if isinstance(A , A ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def __lowerCAmelCase ( self ) -> Optional[int]: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def __lowerCAmelCase ( self , A , A=False ) -> Optional[Any]: if isinstance(A , A ): _UpperCAmelCase : int = value _lowerCAmelCase :str = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' _lowerCAmelCase :List[Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( '''UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.''' ,a ,) class _UpperCAmelCase ( a ): '''simple docstring''' def __init__( self , A ) -> Tuple: super().__init__(A ) _UpperCAmelCase : str = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) _UpperCAmelCase : List[Any] = UperNetHead(A , in_channels=self.backbone.channels ) _UpperCAmelCase : Optional[int] = UperNetFCNHead(A ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) ) @replace_return_docstrings(output_type=A , config_class=_CONFIG_FOR_DOC ) def __lowerCAmelCase ( self , A = None , A = None , A = None , A = None , A = None , ) -> Union[tuple, SemanticSegmenterOutput]: _UpperCAmelCase : Dict = return_dict if return_dict is not None else self.config.use_return_dict _UpperCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) _UpperCAmelCase : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions _UpperCAmelCase : int = self.backbone.forward_with_filtered_kwargs( A , output_hidden_states=A , output_attentions=A ) _UpperCAmelCase : List[str] = outputs.feature_maps _UpperCAmelCase : Dict = self.decode_head(A ) _UpperCAmelCase : Optional[Any] = nn.functional.interpolate(A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=A ) _UpperCAmelCase : Union[str, Any] = None if self.auxiliary_head is not None: _UpperCAmelCase : Tuple = self.auxiliary_head(A ) _UpperCAmelCase : str = nn.functional.interpolate( A , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=A ) _UpperCAmelCase : int = None if labels is not None: if self.config.num_labels == 1: raise ValueError('''The number of labels should be greater than one''' ) else: # compute weighted loss _UpperCAmelCase : Any = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) _UpperCAmelCase : str = loss_fct(A , A ) _UpperCAmelCase : Optional[Any] = loss_fct(A , A ) _UpperCAmelCase : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: _UpperCAmelCase : str = (logits,) + outputs[1:] else: _UpperCAmelCase : Union[str, Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=A , logits=A , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :List[str] = logging.get_logger(__name__) _lowerCAmelCase :Any = { 'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json', 'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''falcon''' a__ =['''past_key_values'''] def __init__( self , A=6_5_0_2_4 , A=4_5_4_4 , A=3_2 , A=7_1 , A=1E-5 , A=0.02 , A=True , A=0.0 , A=0.0 , A=None , A=False , A=False , A=True , A=True , A=False , A=1_1 , A=1_1 , **A , ) -> Any: _UpperCAmelCase : int = vocab_size # Backward compatibility with n_embed kwarg _UpperCAmelCase : Optional[Any] = kwargs.pop('''n_embed''' , A ) _UpperCAmelCase : int = hidden_size if n_embed is None else n_embed _UpperCAmelCase : List[str] = num_hidden_layers _UpperCAmelCase : Tuple = num_attention_heads _UpperCAmelCase : Optional[int] = layer_norm_epsilon _UpperCAmelCase : Tuple = initializer_range _UpperCAmelCase : Optional[int] = use_cache _UpperCAmelCase : Any = hidden_dropout _UpperCAmelCase : Dict = attention_dropout _UpperCAmelCase : Any = bos_token_id _UpperCAmelCase : List[Any] = eos_token_id _UpperCAmelCase : Tuple = num_attention_heads if num_kv_heads is None else num_kv_heads _UpperCAmelCase : Dict = alibi _UpperCAmelCase : Optional[int] = new_decoder_architecture _UpperCAmelCase : str = multi_query # Ignored when new_decoder_architecture is True _UpperCAmelCase : Optional[int] = parallel_attn _UpperCAmelCase : Optional[int] = bias super().__init__(bos_token_id=A , eos_token_id=A , **A ) @property def __lowerCAmelCase ( self ) -> List[str]: return self.hidden_size // self.num_attention_heads @property def __lowerCAmelCase ( self ) -> List[Any]: return not self.alibi
263
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split('''.''' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('''.''' )[:n_shave_prefix_segments] ) def lowerCamelCase_ (UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any=0 ): _UpperCAmelCase : Union[str, Any] = [] for old_item in old_list: _UpperCAmelCase : int = old_item.replace('''in_layers.0''' , '''norm1''' ) _UpperCAmelCase : Dict = new_item.replace('''in_layers.2''' , '''conv1''' ) _UpperCAmelCase : str = new_item.replace('''out_layers.0''' , '''norm2''' ) _UpperCAmelCase : List[Any] = new_item.replace('''out_layers.3''' , '''conv2''' ) _UpperCAmelCase : Tuple = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' ) _UpperCAmelCase : List[str] = new_item.replace('''skip_connection''' , '''conv_shortcut''' ) _UpperCAmelCase : Optional[Any] = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : List[str]=0 ): _UpperCAmelCase : str = [] for old_item in old_list: _UpperCAmelCase : Any = old_item _UpperCAmelCase : str = new_item.replace('''norm.weight''' , '''group_norm.weight''' ) _UpperCAmelCase : Tuple = new_item.replace('''norm.bias''' , '''group_norm.bias''' ) _UpperCAmelCase : int = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' ) _UpperCAmelCase : Dict = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' ) _UpperCAmelCase : Any = shave_segments(UpperCamelCase__ , n_shave_prefix_segments=UpperCamelCase__ ) mapping.append({'''old''': old_item, '''new''': new_item} ) return mapping def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Tuple , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Tuple=None ): assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): _UpperCAmelCase : Union[str, Any] = old_checkpoint[path] _UpperCAmelCase : List[Any] = old_tensor.shape[0] // 3 _UpperCAmelCase : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) _UpperCAmelCase : Optional[int] = old_tensor.shape[0] // config['''num_head_channels'''] // 3 _UpperCAmelCase : Tuple = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str = old_tensor.split(channels // num_heads , dim=1 ) _UpperCAmelCase : Union[str, Any] = query.reshape(UpperCamelCase__ ) _UpperCAmelCase : str = key.reshape(UpperCamelCase__ ) _UpperCAmelCase : List[Any] = value.reshape(UpperCamelCase__ ) for path in paths: _UpperCAmelCase : List[str] = path['''new'''] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here _UpperCAmelCase : Optional[int] = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' ) _UpperCAmelCase : int = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' ) _UpperCAmelCase : List[Any] = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' ) if additional_replacements is not None: for replacement in additional_replacements: _UpperCAmelCase : Union[str, Any] = new_path.replace(replacement['''old'''] , replacement['''new'''] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: _UpperCAmelCase : List[str] = old_checkpoint[path['''old''']][:, :, 0] else: _UpperCAmelCase : Dict = old_checkpoint[path['''old''']] def lowerCamelCase_ (UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] ): _UpperCAmelCase : Dict = {} _UpperCAmelCase : Optional[Any] = checkpoint['''time_embed.0.weight'''] _UpperCAmelCase : Tuple = checkpoint['''time_embed.0.bias'''] _UpperCAmelCase : str = checkpoint['''time_embed.2.weight'''] _UpperCAmelCase : Optional[Any] = checkpoint['''time_embed.2.bias'''] _UpperCAmelCase : Union[str, Any] = checkpoint['''input_blocks.0.0.weight'''] _UpperCAmelCase : str = checkpoint['''input_blocks.0.0.bias'''] _UpperCAmelCase : Optional[int] = checkpoint['''out.0.weight'''] _UpperCAmelCase : Union[str, Any] = checkpoint['''out.0.bias'''] _UpperCAmelCase : int = checkpoint['''out.2.weight'''] _UpperCAmelCase : Dict = checkpoint['''out.2.bias'''] # Retrieves the keys for the input blocks only _UpperCAmelCase : Optional[Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} ) _UpperCAmelCase : str = { layer_id: [key for key in checkpoint if F'input_blocks.{layer_id}' in key] for layer_id in range(UpperCamelCase__ ) } # Retrieves the keys for the middle blocks only _UpperCAmelCase : Union[str, Any] = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} ) _UpperCAmelCase : Optional[Any] = { layer_id: [key for key in checkpoint if F'middle_block.{layer_id}' in key] for layer_id in range(UpperCamelCase__ ) } # Retrieves the keys for the output blocks only _UpperCAmelCase : int = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} ) _UpperCAmelCase : Optional[Any] = { layer_id: [key for key in checkpoint if F'output_blocks.{layer_id}' in key] for layer_id in range(UpperCamelCase__ ) } for i in range(1 , UpperCamelCase__ ): _UpperCAmelCase : Dict = (i - 1) // (config['''num_res_blocks'''] + 1) _UpperCAmelCase : str = (i - 1) % (config['''num_res_blocks'''] + 1) _UpperCAmelCase : Dict = [key for key in input_blocks[i] if F'input_blocks.{i}.0' in key] _UpperCAmelCase : str = [key for key in input_blocks[i] if F'input_blocks.{i}.1' in key] if F'input_blocks.{i}.0.op.weight' in checkpoint: _UpperCAmelCase : Union[str, Any] = checkpoint[ F'input_blocks.{i}.0.op.weight' ] _UpperCAmelCase : int = checkpoint[ F'input_blocks.{i}.0.op.bias' ] continue _UpperCAmelCase : Optional[Any] = renew_resnet_paths(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = {'''old''': F'input_blocks.{i}.0', '''new''': F'down_blocks.{block_id}.resnets.{layer_in_block_id}'} _UpperCAmelCase : List[Any] = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''} assign_to_checkpoint( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path, resnet_op] , config=UpperCamelCase__ ) if len(UpperCamelCase__ ): _UpperCAmelCase : int = renew_attention_paths(UpperCamelCase__ ) _UpperCAmelCase : List[str] = { '''old''': F'input_blocks.{i}.1', '''new''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}', } _UpperCAmelCase : Tuple = { F'input_blocks.{i}.1.qkv.bias': { '''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, F'input_blocks.{i}.1.qkv.weight': { '''key''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': F'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ , ) _UpperCAmelCase : Optional[int] = middle_blocks[0] _UpperCAmelCase : Any = middle_blocks[1] _UpperCAmelCase : Union[str, Any] = middle_blocks[2] _UpperCAmelCase : Tuple = renew_resnet_paths(UpperCamelCase__ ) assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ ) _UpperCAmelCase : str = renew_resnet_paths(UpperCamelCase__ ) assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , config=UpperCamelCase__ ) _UpperCAmelCase : List[Any] = renew_attention_paths(UpperCamelCase__ ) _UpperCAmelCase : Tuple = { '''middle_block.1.qkv.bias''': { '''key''': '''mid_block.attentions.0.key.bias''', '''query''': '''mid_block.attentions.0.query.bias''', '''value''': '''mid_block.attentions.0.value.bias''', }, '''middle_block.1.qkv.weight''': { '''key''': '''mid_block.attentions.0.key.weight''', '''query''': '''mid_block.attentions.0.query.weight''', '''value''': '''mid_block.attentions.0.value.weight''', }, } assign_to_checkpoint( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , attention_paths_to_split=UpperCamelCase__ , config=UpperCamelCase__ ) for i in range(UpperCamelCase__ ): _UpperCAmelCase : Union[str, Any] = i // (config['''num_res_blocks'''] + 1) _UpperCAmelCase : str = i % (config['''num_res_blocks'''] + 1) _UpperCAmelCase : Optional[Any] = [shave_segments(UpperCamelCase__ , 2 ) for name in output_blocks[i]] _UpperCAmelCase : Optional[Any] = {} for layer in output_block_layers: _UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = layer.split('''.''' )[0], shave_segments(UpperCamelCase__ , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(UpperCamelCase__ ) else: _UpperCAmelCase : List[Any] = [layer_name] if len(UpperCamelCase__ ) > 1: _UpperCAmelCase : Dict = [key for key in output_blocks[i] if F'output_blocks.{i}.0' in key] _UpperCAmelCase : Union[str, Any] = [key for key in output_blocks[i] if F'output_blocks.{i}.1' in key] _UpperCAmelCase : Optional[int] = renew_resnet_paths(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = renew_resnet_paths(UpperCamelCase__ ) _UpperCAmelCase : Optional[Any] = {'''old''': F'output_blocks.{i}.0', '''new''': F'up_blocks.{block_id}.resnets.{layer_in_block_id}'} assign_to_checkpoint(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , config=UpperCamelCase__ ) if ["conv.weight", "conv.bias"] in output_block_list.values(): _UpperCAmelCase : Dict = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] ) _UpperCAmelCase : Optional[int] = checkpoint[ F'output_blocks.{i}.{index}.conv.weight' ] _UpperCAmelCase : int = checkpoint[ F'output_blocks.{i}.{index}.conv.bias' ] # Clear attentions as they have been attributed above. if len(UpperCamelCase__ ) == 2: _UpperCAmelCase : int = [] if len(UpperCamelCase__ ): _UpperCAmelCase : Dict = renew_attention_paths(UpperCamelCase__ ) _UpperCAmelCase : Any = { '''old''': F'output_blocks.{i}.1', '''new''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}', } _UpperCAmelCase : str = { F'output_blocks.{i}.1.qkv.bias': { '''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias', '''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias', '''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias', }, F'output_blocks.{i}.1.qkv.weight': { '''key''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight', '''query''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight', '''value''': F'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight', }, } assign_to_checkpoint( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=UpperCamelCase__ , ) else: _UpperCAmelCase : List[Any] = renew_resnet_paths(UpperCamelCase__ , n_shave_prefix_segments=1 ) for path in resnet_0_paths: _UpperCAmelCase : str = '''.'''.join(['''output_blocks''', str(UpperCamelCase__ ), path['''old''']] ) _UpperCAmelCase : Dict = '''.'''.join(['''up_blocks''', str(UpperCamelCase__ ), '''resnets''', str(UpperCamelCase__ ), path['''new''']] ) _UpperCAmelCase : Tuple = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": _lowerCAmelCase :Optional[int] = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the architecture.', ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') _lowerCAmelCase :Optional[Any] = parser.parse_args() _lowerCAmelCase :Optional[int] = torch.load(args.checkpoint_path) with open(args.config_file) as f: _lowerCAmelCase :List[Any] = json.loads(f.read()) _lowerCAmelCase :Optional[Any] = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] _lowerCAmelCase :Union[str, Any] = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: _lowerCAmelCase :Dict = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1])) _lowerCAmelCase :List[str] = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1])) _lowerCAmelCase :Union[str, Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
263
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME _lowerCAmelCase :int = ['small', 'medium', 'large'] _lowerCAmelCase :int = 'lm_head.decoder.weight' _lowerCAmelCase :Dict = 'lm_head.weight' def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str ): _UpperCAmelCase : List[Any] = torch.load(UpperCamelCase__ ) _UpperCAmelCase : List[str] = d.pop(UpperCamelCase__ ) os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ ) torch.save(UpperCamelCase__ , os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": _lowerCAmelCase :Dict = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) _lowerCAmelCase :str = parser.parse_args() for MODEL in DIALOGPT_MODELS: _lowerCAmelCase :Tuple = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") _lowerCAmelCase :int = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
263
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int ): return ( num != den and num % 10 == den // 10 and (num // 10) / (den % 10) == num / den ) def lowerCamelCase_ (UpperCamelCase__ : int ): _UpperCAmelCase : Dict = [] _UpperCAmelCase : Any = 11 _UpperCAmelCase : Optional[int] = int('''1''' + '''0''' * digit_len ) for num in range(UpperCamelCase__ , UpperCamelCase__ ): while den <= 99: if (num != den) and (num % 10 == den // 10) and (den % 10 != 0): if is_digit_cancelling(UpperCamelCase__ , UpperCamelCase__ ): solutions.append(F'{num}/{den}' ) den += 1 num += 1 _UpperCAmelCase : Union[str, Any] = 10 return solutions def lowerCamelCase_ (UpperCamelCase__ : int = 2 ): _UpperCAmelCase : Dict = 1.0 for fraction in fraction_list(UpperCamelCase__ ): _UpperCAmelCase : List[str] = Fraction(UpperCamelCase__ ) result *= frac.denominator / frac.numerator return int(UpperCamelCase__ ) if __name__ == "__main__": print(solution())
263
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _lowerCAmelCase :Tuple = tuple[int, int] class _UpperCAmelCase : '''simple docstring''' def __init__( self , A , A ) -> None: _UpperCAmelCase : set[int] = vertices _UpperCAmelCase : dict[EdgeT, int] = { (min(A ), max(A )): weight for edge, weight in edges.items() } def __lowerCAmelCase ( self , A , A ) -> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) _UpperCAmelCase : List[Any] = weight def __lowerCAmelCase ( self ) -> Graph: _UpperCAmelCase : Graph = Graph({min(self.vertices )} , {} ) _UpperCAmelCase : EdgeT _UpperCAmelCase : int _UpperCAmelCase : EdgeT _UpperCAmelCase : int while len(subgraph.vertices ) < len(self.vertices ): _UpperCAmelCase : Any = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: _UpperCAmelCase : Tuple = edge _UpperCAmelCase : Optional[int] = weight subgraph.add_edge(A , A ) return subgraph def lowerCamelCase_ (UpperCamelCase__ : str = "p107_network.txt" ): _UpperCAmelCase : str = os.path.abspath(os.path.dirname(UpperCamelCase__ ) ) _UpperCAmelCase : str = os.path.join(UpperCamelCase__ , UpperCamelCase__ ) _UpperCAmelCase : dict[EdgeT, int] = {} _UpperCAmelCase : list[str] _UpperCAmelCase : int _UpperCAmelCase : int with open(UpperCamelCase__ ) as f: _UpperCAmelCase : str = f.read().strip().split('''\n''' ) _UpperCAmelCase : List[Any] = [line.split(''',''' ) for line in data] for edgea in range(1 , len(UpperCamelCase__ ) ): for edgea in range(UpperCamelCase__ ): if adjaceny_matrix[edgea][edgea] != "-": _UpperCAmelCase : Optional[Any] = int(adjaceny_matrix[edgea][edgea] ) _UpperCAmelCase : Graph = Graph(set(range(len(UpperCamelCase__ ) ) ) , UpperCamelCase__ ) _UpperCAmelCase : Graph = graph.prims_algorithm() _UpperCAmelCase : int = sum(graph.edges.values() ) _UpperCAmelCase : int = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"{solution() = }")
263
1
"""simple docstring""" from collections.abc import Callable def lowerCamelCase_ (UpperCamelCase__ : Callable[[float], float] , UpperCamelCase__ : float , UpperCamelCase__ : float ): _UpperCAmelCase : float = a _UpperCAmelCase : float = b if function(UpperCamelCase__ ) == 0: # one of the a or b is a root for the function return a elif function(UpperCamelCase__ ) == 0: return b elif ( function(UpperCamelCase__ ) * function(UpperCamelCase__ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: _UpperCAmelCase : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(UpperCamelCase__ ) == 0: return mid elif function(UpperCamelCase__ ) * function(UpperCamelCase__ ) < 0: _UpperCAmelCase : Optional[Any] = mid else: _UpperCAmelCase : Optional[Any] = mid _UpperCAmelCase : str = start + (end - start) / 2.0 return mid def lowerCamelCase_ (UpperCamelCase__ : float ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_000)) import doctest doctest.testmod()
263
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase :int = logging.get_logger(__name__) _lowerCAmelCase :Union[str, Any] = { 'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json', } class _UpperCAmelCase ( a ): '''simple docstring''' a__ ='''mgp-str''' def __init__( self , A=[3_2, 1_2_8] , A=4 , A=3 , A=2_7 , A=3_8 , A=5_0_2_5_7 , A=3_0_5_2_2 , A=7_6_8 , A=1_2 , A=1_2 , A=4.0 , A=True , A=False , A=1E-5 , A=0.0 , A=0.0 , A=0.0 , A=False , A=0.02 , **A , ) -> Union[str, Any]: super().__init__(**A ) _UpperCAmelCase : Any = image_size _UpperCAmelCase : str = patch_size _UpperCAmelCase : Dict = num_channels _UpperCAmelCase : Dict = max_token_length _UpperCAmelCase : Optional[Any] = num_character_labels _UpperCAmelCase : int = num_bpe_labels _UpperCAmelCase : List[str] = num_wordpiece_labels _UpperCAmelCase : Optional[int] = hidden_size _UpperCAmelCase : Any = num_hidden_layers _UpperCAmelCase : List[Any] = num_attention_heads _UpperCAmelCase : List[Any] = mlp_ratio _UpperCAmelCase : List[str] = distilled _UpperCAmelCase : Optional[int] = layer_norm_eps _UpperCAmelCase : str = drop_rate _UpperCAmelCase : List[Any] = qkv_bias _UpperCAmelCase : List[str] = attn_drop_rate _UpperCAmelCase : Dict = drop_path_rate _UpperCAmelCase : Union[str, Any] = output_aa_attentions _UpperCAmelCase : List[str] = initializer_range
263
1
"""simple docstring""" import unittest from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin _lowerCAmelCase :Tuple = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class _UpperCAmelCase ( a ,unittest.TestCase ): '''simple docstring''' a__ =XLNetTokenizer a__ =XLNetTokenizerFast a__ =True a__ =True def __lowerCAmelCase ( self ) -> int: super().setUp() # We have a SentencePiece fixture for testing _UpperCAmelCase : List[Any] = XLNetTokenizer(A , keep_accents=A ) tokenizer.sanitize_special_tokens() tokenizer.save_pretrained(self.tmpdirname ) def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : List[str] = '''<s>''' _UpperCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def __lowerCAmelCase ( self ) -> Optional[int]: _UpperCAmelCase : Dict = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '''<unk>''' ) self.assertEqual(vocab_keys[1] , '''<s>''' ) self.assertEqual(vocab_keys[-1] , '''<eod>''' ) self.assertEqual(len(A ) , 1_0_0_6 ) def __lowerCAmelCase ( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_0 ) def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : str = XLNetTokenizer(A , keep_accents=A ) _UpperCAmelCase : Tuple = tokenizer.tokenize('''This is a test''' ) self.assertListEqual(A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2] ) _UpperCAmelCase : Optional[Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.''', ] , ) _UpperCAmelCase : int = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 0, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 0, 4] ) _UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.''', ] , ) def __lowerCAmelCase ( self ) -> List[Any]: _UpperCAmelCase : Any = XLNetTokenizer(A , do_lower_case=A ) _UpperCAmelCase : Dict = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( A , [ SPIECE_UNDERLINE + '''''', '''i''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) , ['''▁he''', '''ll''', '''o'''] ) def __lowerCAmelCase ( self ) -> Optional[Any]: _UpperCAmelCase : int = XLNetTokenizer(A , do_lower_case=A ) _UpperCAmelCase : Union[str, Any] = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' ) self.assertListEqual( A , [ SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''se''', '''.''', ] , ) @slow def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = XLNetTokenizer.from_pretrained('''xlnet-base-cased''' ) _UpperCAmelCase : Any = tokenizer.encode('''sequence builders''' , add_special_tokens=A ) _UpperCAmelCase : List[Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=A ) _UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A ) _UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == text + [4, 3] assert encoded_pair == text + [4] + text_a + [4, 3] @slow def __lowerCAmelCase ( self ) -> str: # fmt: off _UpperCAmelCase : Union[str, Any] = {'''input_ids''': [[1_7, 2_1_4_4_2, 2_7_0, 1_7, 1_0, 1_4_6_4_5, 3_1_8, 3_4, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 7_7_5_2, 2_2_0_1_8, 2_3, 2_1, 1_7, 4_5_4_6, 3_1_4_5, 7_8_7, 1_3, 3_3_5_2, 1_4_4_3_1, 1_3, 5_5_0_0, 1_1, 1_1_7_6, 5_8_0, 1_3, 1_6_8_1_9, 4_7_9_7, 2_3, 1_7, 1_0, 1_7_1_3_5, 6_5_8, 1_9, 4_5_7, 7_9_3_2, 1_3, 1_8_4, 1_9, 3_1_5_4, 1_7_1_3_5, 6_4_6_8, 1_9, 1_4_0_4, 1_2_2_6_9, 1_9, 4_2_2_9, 5_3_5_6, 1_6_2_6_4, 4_6, 1_9, 1_7, 2_0_5_4_5, 1_0_3_9_5, 9, 9, 9, 1_1, 2_8, 6_4_2_1, 9_5_3_1, 2_0_7_2_9, 1_7, 1_0, 3_5_3, 1_7_0_2_2, 1_1, 2_1, 6_4_2_1, 9_5_3_1, 1_6_9_4_9, 1_7, 1_0, 1_1_5_0_9, 7_5_3, 1_1, 3_3, 9_5, 2_4_2_1, 7_3_8_5, 9_5_6, 1_4_4_3_1, 2_6_2_6, 2_5, 8_4_2, 7_3_8_5, 4_8_3_6, 2_1, 1_4_2_9, 2_2_7_2, 9_8_5_5, 3_1_2_0, 1_6_1, 2_4_7_3_8, 1_9, 1_3_2_0_3, 6_5_8, 2_1_8, 7_8_7, 2_1, 4_3_0, 1_8_4_8_2, 8_4_7, 2_6_3_7, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2_2, 2_2_1_7_8, 2_7, 1_0_6_4, 2_2, 9_5_6, 1_3, 1_1_1_0_1, 1_4_2_9, 5_8_5_4, 2_4_3_1_3, 1_8_9_5_3, 4_0, 4_2_2, 2_4_3_6_6, 6_8, 1_7_5_8, 3_7, 1_0_4_8_3, 1_4_2_5_7, 3_1, 2_0_7, 2_6_3, 2_1, 2_0_3, 3_7_7_3, 2_5, 7_1, 9_7_3_5, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3_2, 2_0_4_9, 3_4_4_2, 1_7, 1_3_8_9_4, 3_3_8_0, 2_3, 9_5, 1_8, 1_7_6_3_4, 2_2_8_8, 9, 4, 3]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name='''xlnet-base-cased''' , revision='''c841166438c31ec7ca9a106dee7bb312b73ae511''' , )
263
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase_ (UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : list[int] , UpperCamelCase__ : float ): if depth < 0: raise ValueError('''Depth cannot be less than 0''' ) if len(UpperCamelCase__ ) == 0: raise ValueError('''Scores cannot be empty''' ) if depth == height: return scores[node_index] if is_max: return max( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) return min( minimax(depth + 1 , node_index * 2 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , minimax(depth + 1 , node_index * 2 + 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , ) def lowerCamelCase_ (): _UpperCAmelCase : Any = [90, 23, 6, 33, 21, 65, 123, 3_4423] _UpperCAmelCase : Any = math.log(len(UpperCamelCase__ ) , 2 ) print('''Optimal value : ''' , end='''''' ) print(minimax(0 , 0 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
263
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase_ (UpperCamelCase__ : list[int] , UpperCamelCase__ : list[int] , UpperCamelCase__ : int ): _UpperCAmelCase : Dict = list(range(len(UpperCamelCase__ ) ) ) _UpperCAmelCase : Optional[int] = [v / w for v, w in zip(UpperCamelCase__ , UpperCamelCase__ )] index.sort(key=lambda UpperCamelCase__ : ratio[i] , reverse=UpperCamelCase__ ) _UpperCAmelCase : float = 0 _UpperCAmelCase : list[float] = [0] * len(UpperCamelCase__ ) for i in index: if weight[i] <= capacity: _UpperCAmelCase : Optional[Any] = 1 max_value += value[i] capacity -= weight[i] else: _UpperCAmelCase : Optional[int] = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
263
"""simple docstring""" import gc import tempfile import unittest import numpy as np import torch from diffusers import VersatileDiffusionPipeline from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device _lowerCAmelCase :Optional[Any] = False class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' pass @nightly @require_torch_gpu class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' def __lowerCAmelCase ( self ) -> List[Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Dict: _UpperCAmelCase : Tuple = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : List[str] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(A ) _UpperCAmelCase : int = VersatileDiffusionPipeline.from_pretrained(A , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = generator.manual_seed(0 ) _UpperCAmelCase : Union[str, Any] = pipe.dual_guided( prompt='''first prompt''' , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=2 , output_type='''numpy''' , ).images assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass" def __lowerCAmelCase ( self ) -> List[str]: _UpperCAmelCase : List[Any] = VersatileDiffusionPipeline.from_pretrained('''shi-labs/versatile-diffusion''' , torch_dtype=torch.floataa ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : int = '''cyberpunk 2077''' _UpperCAmelCase : Optional[int] = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg''' ) _UpperCAmelCase : str = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.dual_guided( prompt=A , image=A , text_to_image_strength=0.75 , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' , ).images _UpperCAmelCase : Union[str, Any] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[Any] = np.array([0.1_448, 0.1_619, 0.1_741, 0.1_086, 0.1_147, 0.1_128, 0.1_199, 0.1_165, 0.1_001] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : Dict = '''A painting of a squirrel eating a burger ''' _UpperCAmelCase : Tuple = torch.manual_seed(0 ) _UpperCAmelCase : Optional[Any] = pipe.text_to_image( prompt=A , generator=A , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='''numpy''' ).images _UpperCAmelCase : Tuple = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : int = np.array([0.3_367, 0.3_169, 0.2_656, 0.3_870, 0.4_790, 0.3_796, 0.4_009, 0.4_878, 0.4_778] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1 _UpperCAmelCase : int = pipe.image_variation(A , generator=A , output_type='''numpy''' ).images _UpperCAmelCase : Optional[int] = image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1] assert image.shape == (1, 5_1_2, 5_1_2, 3) _UpperCAmelCase : List[str] = np.array([0.3_076, 0.3_123, 0.3_284, 0.3_782, 0.3_770, 0.3_894, 0.4_297, 0.4_331, 0.4_456] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
263
1
"""simple docstring""" import os from distutils.util import strtobool def lowerCamelCase_ (UpperCamelCase__ : Tuple , UpperCamelCase__ : Dict ): for e in env_keys: _UpperCAmelCase : str = int(os.environ.get(UpperCamelCase__ , -1 ) ) if val >= 0: return val return default def lowerCamelCase_ (UpperCamelCase__ : str , UpperCamelCase__ : str=False ): _UpperCAmelCase : int = os.environ.get(UpperCamelCase__ , str(UpperCamelCase__ ) ) return strtobool(UpperCamelCase__ ) == 1 # As its name indicates `strtobool` actually returns an int... def lowerCamelCase_ (UpperCamelCase__ : Any , UpperCamelCase__ : Dict="no" ): _UpperCAmelCase : int = os.environ.get(UpperCamelCase__ , str(UpperCamelCase__ ) ) return value
263
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, StableDiffusionAttendAndExcitePipeline, UNetaDConditionModel, ) from diffusers.utils import load_numpy, skip_mps, slow from diffusers.utils.testing_utils import require_torch_gpu from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin _lowerCAmelCase :Any = False @skip_mps class _UpperCAmelCase ( a ,a ,a ,unittest.TestCase ): '''simple docstring''' a__ =StableDiffusionAttendAndExcitePipeline a__ =False a__ =TEXT_TO_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} ) a__ =TEXT_TO_IMAGE_IMAGE_PARAMS a__ =TEXT_TO_IMAGE_IMAGE_PARAMS @classmethod def __lowerCAmelCase ( cls ) -> List[str]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> Tuple: torch.manual_seed(0 ) _UpperCAmelCase : Optional[int] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=A , ) _UpperCAmelCase : List[Any] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=A , set_alpha_to_one=A , ) torch.manual_seed(0 ) _UpperCAmelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) _UpperCAmelCase : int = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='''gelu''' , projection_dim=5_1_2 , ) _UpperCAmelCase : List[str] = CLIPTextModel(A ) _UpperCAmelCase : str = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCAmelCase : Union[str, Any] = { '''unet''': unet, '''scheduler''': scheduler, '''vae''': vae, '''text_encoder''': text_encoder, '''tokenizer''': tokenizer, '''safety_checker''': None, '''feature_extractor''': None, } return components def __lowerCAmelCase ( self , A , A=0 ) -> List[Any]: if str(A ).startswith('''mps''' ): _UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: _UpperCAmelCase : Union[str, Any] = torch.Generator(device=A ).manual_seed(A ) _UpperCAmelCase : List[str] = { '''prompt''': '''a cat and a frog''', '''token_indices''': [2, 5], '''generator''': generator, '''num_inference_steps''': 1, '''guidance_scale''': 6.0, '''output_type''': '''numpy''', '''max_iter_to_alter''': 2, '''thresholds''': {0: 0.7}, } return inputs def __lowerCAmelCase ( self ) -> int: _UpperCAmelCase : List[str] = '''cpu''' _UpperCAmelCase : Tuple = self.get_dummy_components() _UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) _UpperCAmelCase : Dict = self.get_dummy_inputs(A ) _UpperCAmelCase : Union[str, Any] = pipe(**A ).images _UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 6_4, 6_4, 3) ) _UpperCAmelCase : int = np.array( [0.63_905_364, 0.62_897_307, 0.48_599_017, 0.5_133_624, 0.5_550_048, 0.45_769_516, 0.50_326_973, 0.5_023_139, 0.45_384_496] ) _UpperCAmelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1E-3 ) def __lowerCAmelCase ( self ) -> Dict: super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> List[str]: # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCAmelCase ( self ) -> Union[str, Any]: self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) def __lowerCAmelCase ( self ) -> List[str]: super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 ) def __lowerCAmelCase ( self ) -> str: super().test_save_load_local(expected_max_difference=5E-4 ) def __lowerCAmelCase ( self ) -> Optional[int]: super().test_save_load_optional_components(expected_max_difference=4E-4 ) @require_torch_gpu @slow class _UpperCAmelCase ( unittest.TestCase ): '''simple docstring''' @classmethod def __lowerCAmelCase ( cls ) -> Union[str, Any]: super().setUpClass() torch.use_deterministic_algorithms(A ) @classmethod def __lowerCAmelCase ( cls ) -> Optional[int]: super().tearDownClass() torch.use_deterministic_algorithms(A ) def __lowerCAmelCase ( self ) -> List[str]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> str: _UpperCAmelCase : Any = torch.manual_seed(5_1 ) _UpperCAmelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained( '''CompVis/stable-diffusion-v1-4''' , safety_checker=A , torch_dtype=torch.floataa ) pipe.to('''cuda''' ) _UpperCAmelCase : Optional[int] = '''a painting of an elephant with glasses''' _UpperCAmelCase : int = [5, 7] _UpperCAmelCase : Dict = pipe( prompt=A , token_indices=A , guidance_scale=7.5 , generator=A , num_inference_steps=5 , max_iter_to_alter=5 , output_type='''numpy''' , ).images[0] _UpperCAmelCase : List[Any] = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy''' ) assert np.abs((expected_image - image).max() ) < 5E-1
263
1