code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ : Any = logging.get_logger(__name__) lowerCamelCase_ : List[str] = { """asapp/sew-d-tiny-100k""": """https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json""", # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class __A ( _SCREAMING_SNAKE_CASE ): """simple docstring""" __lowerCAmelCase = "sew-d" def __init__( self , __A=32 , __A=768 , __A=12 , __A=12 , __A=3072 , __A=2 , __A=512 , __A=256 , __A=True , __A=True , __A=("p2c", "c2p") , __A="layer_norm" , __A="gelu_python" , __A=0.1 , __A=0.1 , __A=0.1 , __A=0.0 , __A=0.1 , __A=0.02 , __A=1E-7 , __A=1E-5 , __A="group" , __A="gelu" , __A=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , __A=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , __A=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , __A=False , __A=128 , __A=16 , __A=True , __A=0.05 , __A=10 , __A=2 , __A=0.0 , __A=10 , __A=0 , __A="mean" , __A=False , __A=False , __A=256 , __A=0 , __A=1 , __A=2 , **__A , ) -> Optional[Any]: super().__init__(**__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A ) a =hidden_size a =feat_extract_norm a =feat_extract_activation a =list(__A ) a =list(__A ) a =list(__A ) a =conv_bias a =num_conv_pos_embeddings a =num_conv_pos_embedding_groups a =len(self.conv_dim ) a =num_hidden_layers a =intermediate_size a =squeeze_factor a =max_position_embeddings a =position_buckets a =share_att_key a =relative_attention a =norm_rel_ebd a =list(__A ) a =hidden_act a =num_attention_heads a =hidden_dropout a =attention_dropout a =activation_dropout a =feat_proj_dropout a =final_dropout a =layer_norm_eps a =feature_layer_norm_eps a =initializer_range a =vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 a =apply_spec_augment a =mask_time_prob a =mask_time_length a =mask_time_min_masks a =mask_feature_prob a =mask_feature_length a =mask_feature_min_masks # ctc loss a =ctc_loss_reduction a =ctc_zero_infinity # sequence classification a =use_weighted_layer_sum a =classifier_proj_size @property def SCREAMING_SNAKE_CASE ( self ) -> Dict: return functools.reduce(operator.mul , self.conv_stride , 1 )
81
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
0
from .integrations import ( is_optuna_available, is_ray_available, is_sigopt_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, run_hp_search_sigopt, run_hp_search_wandb, ) from .trainer_utils import ( HPSearchBackend, default_hp_space_optuna, default_hp_space_ray, default_hp_space_sigopt, default_hp_space_wandb, ) from .utils import logging A__ = logging.get_logger(__name__) class __lowerCAmelCase : __lowerCamelCase = 42 __lowerCamelCase = None @staticmethod def snake_case ( ): """simple docstring""" raise NotImplementedError def snake_case ( self , _snake_case , _snake_case , _snake_case , **_snake_case ): """simple docstring""" raise NotImplementedError def snake_case ( self , _snake_case ): """simple docstring""" raise NotImplementedError def snake_case ( self ): """simple docstring""" if not self.is_available(): raise RuntimeError( F'You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.' ) @classmethod def snake_case ( cls ): """simple docstring""" return F'`pip install {cls.pip_package or cls.name}`' class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''optuna''' @staticmethod def snake_case ( ): """simple docstring""" return is_optuna_available() def snake_case ( self , _snake_case , _snake_case , _snake_case , **_snake_case ): """simple docstring""" return run_hp_search_optuna(_snake_case , _snake_case , _snake_case , **_snake_case ) def snake_case ( self , _snake_case ): """simple docstring""" return default_hp_space_optuna(_snake_case ) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''ray''' __lowerCamelCase = '''\'ray[tune]\'''' @staticmethod def snake_case ( ): """simple docstring""" return is_ray_available() def snake_case ( self , _snake_case , _snake_case , _snake_case , **_snake_case ): """simple docstring""" return run_hp_search_ray(_snake_case , _snake_case , _snake_case , **_snake_case ) def snake_case ( self , _snake_case ): """simple docstring""" return default_hp_space_ray(_snake_case ) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''sigopt''' @staticmethod def snake_case ( ): """simple docstring""" return is_sigopt_available() def snake_case ( self , _snake_case , _snake_case , _snake_case , **_snake_case ): """simple docstring""" return run_hp_search_sigopt(_snake_case , _snake_case , _snake_case , **_snake_case ) def snake_case ( self , _snake_case ): """simple docstring""" return default_hp_space_sigopt(_snake_case ) class __lowerCAmelCase ( lowerCamelCase__ ): __lowerCamelCase = '''wandb''' @staticmethod def snake_case ( ): """simple docstring""" return is_wandb_available() def snake_case ( self , _snake_case , _snake_case , _snake_case , **_snake_case ): """simple docstring""" return run_hp_search_wandb(_snake_case , _snake_case , _snake_case , **_snake_case ) def snake_case ( self , _snake_case ): """simple docstring""" return default_hp_space_wandb(_snake_case ) A__ = { HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend] } def _UpperCAmelCase ( ): """simple docstring""" _lowerCAmelCase = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()] if len(snake_case ) > 0: _lowerCAmelCase = available_backends[0].name if len(snake_case ) > 1: logger.info( F'{len(snake_case )} hyperparameter search backends available. Using {name} as the default.' ) return name raise RuntimeError( """No hyperparameter search backend available.\n""" + """\n""".join( F' - To install {backend.name} run {backend.pip_install()}' for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
82
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available snake_case_ : Optional[Any] = { 'configuration_swinv2': ['SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP', 'Swinv2Config'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case_ : int = [ 'SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST', 'Swinv2ForImageClassification', 'Swinv2ForMaskedImageModeling', 'Swinv2Model', 'Swinv2PreTrainedModel', ] if TYPE_CHECKING: from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swinva import ( SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST, SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel, SwinvaPreTrainedModel, ) else: import sys snake_case_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
83
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
0
"""simple docstring""" def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Dict ) -> Dict: '''simple docstring''' lowerCAmelCase_ :Any = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def _snake_case ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : str ) -> Optional[Any]: '''simple docstring''' lowerCAmelCase_ :Tuple = 0 while b > 0: if b & 1: lowerCAmelCase_ :str = ((res % c) + (a % c)) % c a += a b >>= 1 return res
84
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
0
'''simple docstring''' import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu _SCREAMING_SNAKE_CASE : int = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: _SCREAMING_SNAKE_CASE : List[Any] = json.load(f) @require_torch class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self , a__ ) -> Dict: '''simple docstring''' return FSMTTokenizer.from_pretrained(a__ ) def lowerCAmelCase__ ( self , a__ ) -> Tuple: '''simple docstring''' snake_case_ = FSMTForConditionalGeneration.from_pretrained(a__ ).to(a__ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ["en-ru", 2_6.0], ["ru-en", 2_2.0], ["en-de", 2_2.0], ["de-en", 2_9.0], ] ) @slow def lowerCAmelCase__ ( self , a__ , a__ ) -> Optional[int]: '''simple docstring''' snake_case_ = F'facebook/wmt19-{pair}' snake_case_ = self.get_tokenizer(a__ ) snake_case_ = self.get_model(a__ ) snake_case_ = bleu_data[pair]["src"] snake_case_ = bleu_data[pair]["tgt"] snake_case_ = tokenizer(a__ , return_tensors="pt" , truncation=a__ , padding="longest" ).to(a__ ) snake_case_ = model.generate( input_ids=batch.input_ids , num_beams=8 , ) snake_case_ = tokenizer.batch_decode( a__ , skip_special_tokens=a__ , clean_up_tokenization_spaces=a__ ) snake_case_ = calculate_bleu(a__ , a__ ) print(a__ ) self.assertGreaterEqual(scores["bleu"] , a__ )
85
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
0
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Any = [1] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = 0, 0, 0 __lowerCAmelCase : List[str] = ugly_nums[ia] * 2 __lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 3 __lowerCAmelCase : Any = ugly_nums[ia] * 5 for _ in range(1 , _UpperCamelCase ): __lowerCAmelCase : int = min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ugly_nums.append(_UpperCamelCase ) if next_num == next_a: ia += 1 __lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __lowerCAmelCase : Any = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __lowerCAmelCase : int = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'{ugly_numbers(200) = }')
86
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = { '''configuration_luke''': ['''LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LukeConfig'''], '''tokenization_luke''': ['''LukeTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''LUKE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LukeForEntityClassification''', '''LukeForEntityPairClassification''', '''LukeForEntitySpanClassification''', '''LukeForMultipleChoice''', '''LukeForQuestionAnswering''', '''LukeForSequenceClassification''', '''LukeForTokenClassification''', '''LukeForMaskedLM''', '''LukeModel''', '''LukePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
0
from typing import Optional import numpy as np import torch from torch import nn from transformers import GPTaConfig, GPTaLMHeadModel from transformers.modeling_utils import ModuleUtilsMixin from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class UpperCAmelCase_ ( _A , _A , _A ): '''simple docstring''' a__ = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""] @register_to_config def __init__( self : str , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 5_0257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ) -> Tuple: """simple docstring""" super().__init__() __magic_name__ = prefix_length if prefix_inner_dim != n_embd and prefix_hidden_dim is None: raise ValueError( F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and''' F''' `n_embd`: {n_embd} are not equal.''' ) __magic_name__ = prefix_inner_dim __magic_name__ = prefix_hidden_dim __magic_name__ = ( nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim ) if self.prefix_hidden_dim is not None else nn.Identity() ) __magic_name__ = ( nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity() ) __magic_name__ = GPTaConfig( vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , ) __magic_name__ = GPTaLMHeadModel(UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.transformer.transformer.wte(UpperCamelCase__ ) __magic_name__ = self.encode_prefix(UpperCamelCase__ ) __magic_name__ = self.decode_prefix(UpperCamelCase__ ) __magic_name__ = torch.cat((prefix_embeds, embedding_text) , dim=1 ) if labels is not None: __magic_name__ = self.get_dummy_token(input_ids.shape[0] , input_ids.device ) __magic_name__ = torch.cat((dummy_token, input_ids) , dim=1 ) __magic_name__ = self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ ) if self.prefix_hidden_dim is not None: return out, hidden else: return out def _lowercase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ) -> torch.Tensor: """simple docstring""" return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : Any ) -> int: """simple docstring""" return self.encode_prefix(UpperCamelCase__ ) @torch.no_grad() def _lowercase ( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Any: """simple docstring""" __magic_name__ = torch.split(UpperCamelCase__ , 1 , dim=0 ) __magic_name__ = [] __magic_name__ = [] for feature in features: __magic_name__ = self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature # Only support beam search for now __magic_name__ , __magic_name__ = self.generate_beam( input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) generated_tokens.append(output_tokens[0] ) generated_seq_lengths.append(seq_lengths[0] ) __magic_name__ = torch.stack(UpperCamelCase__ ) __magic_name__ = torch.stack(UpperCamelCase__ ) return generated_tokens, generated_seq_lengths @torch.no_grad() def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ) -> Optional[int]: """simple docstring""" __magic_name__ = eos_token_id __magic_name__ = None __magic_name__ = None __magic_name__ = torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int ) __magic_name__ = torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool ) if input_embeds is not None: __magic_name__ = input_embeds else: __magic_name__ = self.transformer.transformer.wte(UpperCamelCase__ ) for i in range(UpperCamelCase__ ): __magic_name__ = self.transformer(inputs_embeds=UpperCamelCase__ ) __magic_name__ = outputs.logits __magic_name__ = logits[:, -1, :] / (temperature if temperature > 0 else 1.0) __magic_name__ = logits.softmax(-1 ).log() if scores is None: __magic_name__ , __magic_name__ = logits.topk(UpperCamelCase__ , -1 ) __magic_name__ = generated.expand(UpperCamelCase__ , *generated.shape[1:] ) __magic_name__ , __magic_name__ = next_tokens.permute(1 , 0 ), scores.squeeze(0 ) if tokens is None: __magic_name__ = next_tokens else: __magic_name__ = tokens.expand(UpperCamelCase__ , *tokens.shape[1:] ) __magic_name__ = torch.cat((tokens, next_tokens) , dim=1 ) else: __magic_name__ = -float(np.inf ) __magic_name__ = 0 __magic_name__ = scores[:, None] + logits seq_lengths[~is_stopped] += 1 __magic_name__ = scores_sum / seq_lengths[:, None] __magic_name__ , __magic_name__ = scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 ) __magic_name__ = next_tokens // scores_sum.shape[1] __magic_name__ = seq_lengths[next_tokens_source] __magic_name__ = next_tokens % scores_sum.shape[1] __magic_name__ = next_tokens.unsqueeze(1 ) __magic_name__ = tokens[next_tokens_source] __magic_name__ = torch.cat((tokens, next_tokens) , dim=1 ) __magic_name__ = generated[next_tokens_source] __magic_name__ = scores_sum_average * seq_lengths __magic_name__ = is_stopped[next_tokens_source] __magic_name__ = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 ) __magic_name__ = torch.cat((generated, next_token_embed) , dim=1 ) __magic_name__ = is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze() if is_stopped.all(): break __magic_name__ = scores / seq_lengths __magic_name__ = scores.argsort(descending=UpperCamelCase__ ) # tokens tensors are already padded to max_seq_length __magic_name__ = [tokens[i] for i in order] __magic_name__ = torch.stack(UpperCamelCase__ , dim=0 ) __magic_name__ = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype ) return output_texts, seq_lengths
88
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
0
'''simple docstring''' from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase = logging.get_logger(__name__) class __magic_name__ ( _UpperCamelCase ): lowerCAmelCase : str = ['audio_values', 'audio_mask'] def __init__( self : Tuple ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : List[str]=1 ,_UpperCAmelCase : Any=[16, 16] ,_UpperCAmelCase : Dict=128 ,_UpperCAmelCase : Tuple=44100 ,_UpperCAmelCase : Optional[Any]=86 ,_UpperCAmelCase : str=2048 ,_UpperCAmelCase : Union[str, Any]=0.0 ,**_UpperCAmelCase : List[str] ,): super().__init__( feature_size=_UpperCAmelCase ,sampling_rate=_UpperCAmelCase ,padding_value=_UpperCAmelCase ,**_UpperCAmelCase ,) _a : List[Any] = spectrogram_length _a : Tuple = num_channels _a : Dict = patch_size _a : Optional[Any] = feature_size // self.patch_size[1] _a : Tuple = n_fft _a : List[Any] = sampling_rate // hop_length_to_sampling_rate _a : List[str] = sampling_rate _a : Union[str, Any] = padding_value _a : Any = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=_UpperCAmelCase ,min_frequency=0.0 ,max_frequency=2_20_50.0 ,sampling_rate=_UpperCAmelCase ,norm='slaney' ,mel_scale='slaney' ,).T def __lowercase ( self : Tuple ,_UpperCAmelCase : np.array ): _a : List[str] = spectrogram( _UpperCAmelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters.T ,log_mel='dB' ,db_range=80.0 ,) _a : List[Any] = log_spec[:, :-1] _a : Optional[Any] = log_spec - 20.0 _a : Dict = np.clip(log_spec / 40.0 ,-2.0 ,0.0 ) + 1.0 return log_spec def __call__( self : int ,_UpperCAmelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_UpperCAmelCase : Optional[Union[str, TensorType]] = None ,_UpperCAmelCase : Optional[bool] = True ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : bool = False ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Any ,): if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( 'This feature extractor is set to support sampling rate' F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" F""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) _a : Optional[Any] = isinstance(_UpperCAmelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) _a : Optional[int] = is_batched_numpy or ( isinstance(_UpperCAmelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: _a : Union[str, Any] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(_UpperCAmelCase ,np.ndarray ): _a : Optional[int] = np.asarray(_UpperCAmelCase ,dtype=np.floataa ) elif isinstance(_UpperCAmelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): _a : Optional[Any] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: _a : Dict = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis _a : Any = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] ,_UpperCAmelCase ): _a : Tuple = [np.asarray(_UpperCAmelCase ,dtype=np.floataa ) for feature in audio_features] # Create audio attention mask _a : List[Any] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: _a : Dict = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] _a : Union[str, Any] = np.array(_UpperCAmelCase ).astype(np.floataa ) # convert into correct format for padding _a : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch _a : Union[str, Any] = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) _a : List[Any] = padded_audio_features * self.padding_value for i in range(len(_UpperCAmelCase ) ): _a : Any = audio_features[i] _a : List[Any] = feature # return as BatchFeature if return_attention_mask: _a : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask} else: _a : Tuple = {'audio_values': padded_audio_features} _a : Optional[Any] = BatchFeature(data=_UpperCAmelCase ,tensor_type=_UpperCAmelCase ) return encoded_inputs
89
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
0
import json import os from pathlib import Path from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple, Union import sentencepiece from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __A = logging.get_logger(__name__) __A = "▁" __A = { "vocab_file": "vocab.json", "spm_file": "sentencepiece.bpe.model", } __A = { "vocab_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json" ), }, "spm_file": { "facebook/s2t-small-librispeech-asr": ( "https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model" ) }, } __A = { "facebook/s2t-small-librispeech-asr": 10_24, } __A = ["pt", "fr", "ru", "nl", "ro", "it", "es", "de"] __A = {"mustc": MUSTC_LANGS} class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" snake_case_ = VOCAB_FILES_NAMES snake_case_ = PRETRAINED_VOCAB_FILES_MAP snake_case_ = MAX_MODEL_INPUT_SIZES snake_case_ = ['''input_ids''', '''attention_mask'''] snake_case_ = [] def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<unk>" , lowerCamelCase__=False , lowerCamelCase__=False , lowerCamelCase__=None , lowerCamelCase__=None , lowerCamelCase__ = None , **lowerCamelCase__ , ) -> None: '''simple docstring''' __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , do_upper_case=lowerCamelCase__ , do_lower_case=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , lang_codes=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , ) __lowerCamelCase = do_upper_case __lowerCamelCase = do_lower_case __lowerCamelCase = load_json(lowerCamelCase__ ) __lowerCamelCase = {v: k for k, v in self.encoder.items()} __lowerCamelCase = spm_file __lowerCamelCase = load_spm(lowerCamelCase__ , self.sp_model_kwargs ) if lang_codes is not None: __lowerCamelCase = lang_codes __lowerCamelCase = LANGUAGES[lang_codes] __lowerCamelCase = [f"""<lang:{lang}>""" for lang in self.langs] __lowerCamelCase = {lang: self.sp_model.PieceToId(f"""<lang:{lang}>""" ) for lang in self.langs} __lowerCamelCase = self.lang_tokens __lowerCamelCase = tgt_lang if tgt_lang is not None else self.langs[0] self.set_tgt_lang_special_tokens(self._tgt_lang ) else: __lowerCamelCase = {} @property def lowercase_ ( self ) -> int: '''simple docstring''' return len(self.encoder ) @property def lowercase_ ( self ) -> str: '''simple docstring''' return self._tgt_lang @tgt_lang.setter def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = new_tgt_lang self.set_tgt_lang_special_tokens(lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = self.lang_code_to_id[tgt_lang] __lowerCamelCase = [lang_code_id] def lowercase_ ( self , lowerCamelCase__ ) -> List[str]: '''simple docstring''' return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ ) def lowercase_ ( self , lowerCamelCase__ ) -> Tuple: '''simple docstring''' return self.encoder.get(lowerCamelCase__ , self.encoder[self.unk_token] ) def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' return self.decoder.get(lowerCamelCase__ , self.unk_token ) def lowercase_ ( self , lowerCamelCase__ ) -> str: '''simple docstring''' __lowerCamelCase = [] __lowerCamelCase = '' for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: __lowerCamelCase = self.sp_model.decode(lowerCamelCase__ ) out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " " __lowerCamelCase = [] else: current_sub_tokens.append(lowerCamelCase__ ) __lowerCamelCase = self.sp_model.decode(lowerCamelCase__ ) out_string += decoded.upper() if self.do_upper_case else decoded return out_string.strip() def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id] def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) __lowerCamelCase = [1] * len(self.prefix_tokens ) __lowerCamelCase = [1] if token_ids_a is None: return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones def lowercase_ ( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.encoder.copy() vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self ) -> Dict: '''simple docstring''' __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None return state def __setstate__( self , lowerCamelCase__ ) -> None: '''simple docstring''' __lowerCamelCase = d # for backward compatibility if not hasattr(self , 'sp_model_kwargs' ): __lowerCamelCase = {} __lowerCamelCase = load_spm(self.spm_file , self.sp_model_kwargs ) def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple[str]: '''simple docstring''' __lowerCamelCase = Path(lowerCamelCase__ ) assert save_dir.is_dir(), f"""{save_directory} should be a directory""" __lowerCamelCase = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file'] ) __lowerCamelCase = save_dir / ( (filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file'] ) save_json(self.encoder , lowerCamelCase__ ) if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ): copyfile(self.spm_file , lowerCamelCase__ ) elif not os.path.isfile(self.spm_file ): with open(lowerCamelCase__ , 'wb' ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (str(lowerCamelCase__ ), str(lowerCamelCase__ )) def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor: """simple docstring""" __lowerCamelCase = sentencepiece.SentencePieceProcessor(**UpperCamelCase__ ) spm.Load(str(UpperCamelCase__ ) ) return spm def lowerCamelCase_ ( UpperCamelCase__ : str ) -> Union[Dict, List]: """simple docstring""" with open(UpperCamelCase__ , 'r' ) as f: return json.load(UpperCamelCase__ ) def lowerCamelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : str ) -> None: """simple docstring""" with open(UpperCamelCase__ , 'w' ) as f: json.dump(UpperCamelCase__ , UpperCamelCase__ , indent=2 )
90
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
0
"""simple docstring""" def _A (__a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(__a )] SCREAMING_SNAKE_CASE_ : str = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1 or len(__a ) <= key: return input_string for position, character in enumerate(__a ): SCREAMING_SNAKE_CASE_ : Optional[int] = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE_ : int = min(__a , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append(__a ) SCREAMING_SNAKE_CASE_ : List[str] = [''''''.join(__a ) for row in temp_grid] SCREAMING_SNAKE_CASE_ : Any = ''''''.join(__a ) return output_string def _A (__a , __a ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = [] SCREAMING_SNAKE_CASE_ : List[str] = key - 1 if key <= 0: raise ValueError('''Height of grid can\'t be 0 or negative''' ) if key == 1: return input_string SCREAMING_SNAKE_CASE_ : list[list[str]] = [[] for _ in range(__a )] # generates template for position in range(len(__a ) ): SCREAMING_SNAKE_CASE_ : str = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE_ : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern temp_grid[num].append('''*''' ) SCREAMING_SNAKE_CASE_ : Tuple = 0 for row in temp_grid: # fills in the characters SCREAMING_SNAKE_CASE_ : str = input_string[counter : counter + len(__a )] grid.append(list(__a ) ) counter += len(__a ) SCREAMING_SNAKE_CASE_ : Any = '''''' # reads as zigzag for position in range(len(__a ) ): SCREAMING_SNAKE_CASE_ : Dict = position % (lowest * 2) # puts it in bounds SCREAMING_SNAKE_CASE_ : Tuple = min(__a , lowest * 2 - num ) # creates zigzag pattern output_string += grid[num][0] grid[num].pop(0 ) return output_string def _A (__a ) -> dict[int, str]: """simple docstring""" SCREAMING_SNAKE_CASE_ : Tuple = {} for key_guess in range(1 , len(__a ) ): # tries every key SCREAMING_SNAKE_CASE_ : List[Any] = decrypt(__a , __a ) return results if __name__ == "__main__": import doctest doctest.testmod()
91
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase__ = logging.get_logger(__name__) UpperCamelCase__ = { """edbeeching/decision-transformer-gym-hopper-medium""": ( """https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json""" ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class a__ ( snake_case__ ): _a : Optional[int] = """decision_transformer""" _a : Optional[int] = ["""past_key_values"""] _a : Dict = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self , _A=1_7 , _A=4 , _A=1_2_8 , _A=4_0_9_6 , _A=True , _A=1 , _A=1_0_2_4 , _A=3 , _A=1 , _A=None , _A="relu" , _A=0.1 , _A=0.1 , _A=0.1 , _A=1E-5 , _A=0.02 , _A=True , _A=True , _A=5_0_2_5_6 , _A=5_0_2_5_6 , _A=False , _A=False , **_A , ): """simple docstring""" __lowerCAmelCase = state_dim __lowerCAmelCase = act_dim __lowerCAmelCase = hidden_size __lowerCAmelCase = max_ep_len __lowerCAmelCase = action_tanh __lowerCAmelCase = vocab_size __lowerCAmelCase = n_positions __lowerCAmelCase = n_layer __lowerCAmelCase = n_head __lowerCAmelCase = n_inner __lowerCAmelCase = activation_function __lowerCAmelCase = resid_pdrop __lowerCAmelCase = embd_pdrop __lowerCAmelCase = attn_pdrop __lowerCAmelCase = layer_norm_epsilon __lowerCAmelCase = initializer_range __lowerCAmelCase = scale_attn_weights __lowerCAmelCase = use_cache __lowerCAmelCase = scale_attn_by_inverse_layer_idx __lowerCAmelCase = reorder_and_upcast_attn __lowerCAmelCase = bos_token_id __lowerCAmelCase = eos_token_id super().__init__(bos_token_id=_A , eos_token_id=_A , **_A )
92
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowercase : List[Any] = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : Optional[Any] = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys _lowercase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
93
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) if is_sentencepiece_available(): from ..ta.tokenization_ta import TaTokenizer else: from ...utils.dummy_sentencepiece_objects import TaTokenizer snake_case : Dict = TaTokenizer if is_tokenizers_available(): from ..ta.tokenization_ta_fast import TaTokenizerFast else: from ...utils.dummy_tokenizers_objects import TaTokenizerFast snake_case : Dict = TaTokenizerFast snake_case : str = {'''configuration_mt5''': ['''MT5Config''', '''MT5OnnxConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Dict = [ '''MT5EncoderModel''', '''MT5ForConditionalGeneration''', '''MT5ForQuestionAnswering''', '''MT5Model''', '''MT5PreTrainedModel''', '''MT5Stack''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : List[str] = ['''TFMT5EncoderModel''', '''TFMT5ForConditionalGeneration''', '''TFMT5Model'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: snake_case : Dict = ['''FlaxMT5EncoderModel''', '''FlaxMT5ForConditionalGeneration''', '''FlaxMT5Model'''] if TYPE_CHECKING: from .configuration_mta import MTaConfig, MTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mta import ( MTaEncoderModel, MTaForConditionalGeneration, MTaForQuestionAnswering, MTaModel, MTaPreTrainedModel, MTaStack, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel else: import sys snake_case : Any = _LazyModule( __name__, globals()['''__file__'''], _import_structure, extra_objects={'''MT5Tokenizer''': MTaTokenizer, '''MT5TokenizerFast''': MTaTokenizerFast}, module_spec=__spec__, )
94
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
0
def _A ( SCREAMING_SNAKE_CASE : float ): """simple docstring""" if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def _A ( SCREAMING_SNAKE_CASE : float ): """simple docstring""" if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
95
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
0
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() lowercase__ = logging.get_logger() @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = field(default_factory=lowercase ) lowerCamelCase__ = field(default_factory=lowercase ) def A_ ( self , lowercase , lowercase , lowercase ): _lowerCamelCase : Any = len(list(m.modules() ) ) == 1 or isinstance(lowercase , nn.Convad ) or isinstance(lowercase , nn.BatchNormad ) if has_not_submodules: self.traced.append(lowercase ) def __call__( self , lowercase ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(lowercase ) [x.remove() for x in self.handles] return self @property def A_ ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda lowercase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowerCAmelCase__ : '''simple docstring''' lowerCamelCase__ = 42 lowerCamelCase__ = 42 lowerCamelCase__ = 0 lowerCamelCase__ = field(default_factory=lowercase ) lowerCamelCase__ = field(default_factory=lowercase ) def __call__( self , lowercase ): _lowerCamelCase : Union[str, Any] = Tracker(self.dest )(lowercase ).parametrized _lowerCamelCase : Optional[Any] = Tracker(self.src )(lowercase ).parametrized _lowerCamelCase : Tuple = list(filter(lambda lowercase : type(lowercase ) not in self.src_skip , lowercase ) ) _lowerCamelCase : Optional[Any] = list(filter(lambda lowercase : type(lowercase ) not in self.dest_skip , lowercase ) ) if len(lowercase ) != len(lowercase ): raise Exception( F'''Numbers of operations are different. Source module has {len(lowercase )} operations while''' F''' destination module has {len(lowercase )}.''' ) for dest_m, src_m in zip(lowercase , lowercase ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def _snake_case ( lowercase__ , lowercase__ , lowercase__ , lowercase__ = True ): print(f'''Converting {name}...''' ) with torch.no_grad(): _lowerCamelCase : List[str] = timm.create_model(lowercase__ , pretrained=lowercase__ ).eval() _lowerCamelCase : List[Any] = ResNetForImageClassification(lowercase__ ).eval() _lowerCamelCase : Union[str, Any] = ModuleTransfer(src=lowercase__ , dest=lowercase__ ) _lowerCamelCase : int = torch.randn((1, 3, 224, 224) ) module_transfer(lowercase__ ) assert torch.allclose(from_model(lowercase__ ) , our_model(lowercase__ ).logits ), "The model logits don't match the original one." _lowerCamelCase : Optional[int] = f'''resnet{'-'.join(name.split('resnet' ) )}''' print(lowercase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=lowercase__ , ) # we can use the convnext one _lowerCamelCase : Optional[Any] = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=lowercase__ , ) print(f'''Pushed {checkpoint_name}''' ) def _snake_case ( lowercase__ , lowercase__ = None , lowercase__ = True ): _lowerCamelCase : Dict = 'imagenet-1k-id2label.json' _lowerCamelCase : List[Any] = 1000 _lowerCamelCase : List[str] = (1, num_labels) _lowerCamelCase : int = 'huggingface/label-files' _lowerCamelCase : int = num_labels _lowerCamelCase : int = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) ) _lowerCamelCase : Optional[Any] = {int(lowercase__ ): v for k, v in idalabel.items()} _lowerCamelCase : Union[str, Any] = idalabel _lowerCamelCase : Any = {v: k for k, v in idalabel.items()} _lowerCamelCase : str = partial(lowercase__ , num_labels=lowercase__ , idalabel=lowercase__ , labelaid=lowercase__ ) _lowerCamelCase : Optional[Any] = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(lowercase__ , names_to_config[model_name] , lowercase__ , lowercase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return config, expected_shape if __name__ == "__main__": lowercase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) lowercase__ = parser.parse_args() lowercase__ = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
96
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
0
'''simple docstring''' import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging __snake_case = logging.get_logger(__name__) def a ( __a , __a , __a , __a=False ) -> Dict: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise if not is_sharded: UpperCamelCase__ :Optional[Any] = os.path.abspath(__a ) logger.info(f'''Loading PyTorch weights from {pt_path}''' ) UpperCamelCase__ :Tuple = torch.load(__a , map_location='''cpu''' ) logger.info(f'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) UpperCamelCase__ :List[Any] = convert_pytorch_state_dict_to_flax(__a , __a ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files UpperCamelCase__ :Union[str, Any] = convert_pytorch_sharded_state_dict_to_flax(__a , __a ) return flax_state_dict def a ( __a , __a , __a , __a , ) -> (Tuple[str], np.ndarray): '''simple docstring''' def is_key_or_prefix_key_in_dict(__a ) -> bool: return len(set(__a ) & {key, (model_prefix,) + key} ) > 0 # layer norm UpperCamelCase__ :Dict = pt_tuple_key[:-1] + ('''scale''',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean UpperCamelCase__ :Dict = pt_tuple_key[:-1] + ('''mean''',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var UpperCamelCase__ :Optional[Any] = pt_tuple_key[:-1] + ('''var''',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # embedding UpperCamelCase__ :Optional[int] = pt_tuple_key[:-1] + ('''embedding''',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__a ): return renamed_pt_tuple_key, pt_tensor # conv layer UpperCamelCase__ :Tuple = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__a ): UpperCamelCase__ :int = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer UpperCamelCase__ :Tuple = pt_tuple_key[:-1] + ('''kernel''',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__a ): UpperCamelCase__ :str = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight UpperCamelCase__ :List[str] = pt_tuple_key[:-1] + ('''weight''',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias UpperCamelCase__ :List[Any] = pt_tuple_key[:-1] + ('''bias''',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 UpperCamelCase__ :Tuple = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): UpperCamelCase__ :Tuple = pt_tuple_key[-2] + '''_g''' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): UpperCamelCase__ :Union[str, Any] = pt_tuple_key[-2] + '''_v''' if name is not None: UpperCamelCase__ :Tuple = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def a ( __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCamelCase__ :List[str] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: UpperCamelCase__ :Optional[Any] = flax_model.params['''params'''] else: UpperCamelCase__ :Optional[Any] = flax_model.params UpperCamelCase__ :Union[str, Any] = flatten_dict(__a ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCamelCase__ :List[str] = flatten_dict(flax_model.params['''batch_stats'''] ) random_flax_state_dict.update(__a ) UpperCamelCase__ :Any = {} UpperCamelCase__ :Optional[int] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) UpperCamelCase__ :Union[str, Any] = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCamelCase__ :int = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary UpperCamelCase__ :Union[str, Any] = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase__ :List[str] = pt_tuple_key[1:] # Correctly rename weight parameters UpperCamelCase__ , UpperCamelCase__ :int = rename_key_and_reshape_tensor( __a , __a , __a , __a ) # add model prefix if necessary UpperCamelCase__ :int = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase__ :Optional[Any] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: UpperCamelCase__ :Union[str, Any] = jnp.asarray(__a ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__a , __a ) continue # also add unexpected weight so that warning is thrown UpperCamelCase__ :Tuple = jnp.asarray(__a ) else: # also add unexpected weight so that warning is thrown UpperCamelCase__ :Union[str, Any] = jnp.asarray(__a ) return unflatten_dict(__a ) def a ( __a , __a ) -> List[str]: '''simple docstring''' import torch # Load the index UpperCamelCase__ :int = {} for shard_file in shard_filenames: # load using msgpack utils UpperCamelCase__ :List[Any] = torch.load(__a ) UpperCamelCase__ :List[Any] = {k: v.numpy() for k, v in pt_state_dict.items()} UpperCamelCase__ :List[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: UpperCamelCase__ :int = flax_model.params['''params'''] UpperCamelCase__ :Union[str, Any] = flatten_dict(__a ) random_flax_state_dict.update(flatten_dict(flax_model.params['''batch_stats'''] ) ) else: UpperCamelCase__ :Dict = flax_model.params UpperCamelCase__ :int = flatten_dict(__a ) UpperCamelCase__ :List[str] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) UpperCamelCase__ :int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('''.''' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): UpperCamelCase__ :str = tuple(pt_key.split('''.''' ) ) # remove base model prefix if necessary UpperCamelCase__ :Dict = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase__ :Dict = pt_tuple_key[1:] # Correctly rename weight parameters UpperCamelCase__ , UpperCamelCase__ :Optional[Any] = rename_key_and_reshape_tensor( __a , __a , __a , __a ) # add model prefix if necessary UpperCamelCase__ :Any = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase__ :List[str] = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( f'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' f'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: UpperCamelCase__ :List[Any] = jnp.asarray(__a ) continue if "var" in flax_key[-1]: UpperCamelCase__ :List[Any] = jnp.asarray(__a ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__a , __a ) continue # also add unexpected weight so that warning is thrown UpperCamelCase__ :str = jnp.asarray(__a ) else: # also add unexpected weight so that warning is thrown UpperCamelCase__ :List[Any] = jnp.asarray(__a ) return unflatten_dict(__a ) def a ( __a , __a ) -> Tuple: '''simple docstring''' UpperCamelCase__ :Tuple = os.path.abspath(__a ) logger.info(f'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class UpperCamelCase__ :str = getattr(__a , '''Flax''' + model.__class__.__name__ ) # load flax weight dict with open(__a , '''rb''' ) as state_f: try: UpperCamelCase__ :Tuple = from_bytes(__a , state_f.read() ) except UnpicklingError: raise EnvironmentError(f'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__a , __a ) def a ( __a , __a ) -> int: '''simple docstring''' try: import torch # noqa: F401 except ImportError: logger.error( '''Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see''' ''' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation''' ''' instructions.''' ) raise # check if we have bf16 weights UpperCamelCase__ :List[Any] = flatten_dict(jax.tree_util.tree_map(lambda __a : x.dtype == jnp.bfloataa , __a ) ).values() if any(__a ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( '''Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ''' '''before loading those in PyTorch model.''' ) UpperCamelCase__ :str = jax.tree_util.tree_map( lambda __a : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __a ) UpperCamelCase__ :Optional[Any] = flatten_dict(__a ) UpperCamelCase__ :str = pt_model.state_dict() UpperCamelCase__ :List[Any] = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) UpperCamelCase__ :Optional[int] = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('''.''' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys UpperCamelCase__ :Optional[Any] = [] UpperCamelCase__ :str = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): UpperCamelCase__ :Tuple = flax_key_tuple[0] == pt_model.base_model_prefix UpperCamelCase__ :int = '''.'''.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: UpperCamelCase__ :str = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: UpperCamelCase__ :List[str] = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__a ) not in pt_model_dict: # conv layer UpperCamelCase__ :Any = flax_key_tuple[:-1] + ('''weight''',) UpperCamelCase__ :Tuple = jnp.transpose(__a , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__a ) not in pt_model_dict: # linear layer UpperCamelCase__ :Dict = flax_key_tuple[:-1] + ('''weight''',) UpperCamelCase__ :Tuple = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: UpperCamelCase__ :Union[str, Any] = flax_key_tuple[:-1] + ('''weight''',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: UpperCamelCase__ :Optional[Any] = flax_key_tuple[:-1] + ('''running_mean''',) elif "var" in flax_key_tuple[-1]: UpperCamelCase__ :List[Any] = flax_key_tuple[:-1] + ('''running_var''',) if "batch_stats" in flax_state: UpperCamelCase__ :Optional[int] = '''.'''.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: UpperCamelCase__ :Any = '''.'''.join(__a ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. UpperCamelCase__ :Dict = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: UpperCamelCase__ :Dict = key.split('''.''' ) UpperCamelCase__ :Union[str, Any] = None if key_components[-3::2] == ["parametrizations", "original0"]: UpperCamelCase__ :Union[str, Any] = key_components[-2] + '''_g''' elif key_components[-3::2] == ["parametrizations", "original1"]: UpperCamelCase__ :List[Any] = key_components[-2] + '''_v''' if name is not None: UpperCamelCase__ :Dict = key_components[:-3] + [name] UpperCamelCase__ :Optional[int] = '''.'''.join(__a ) UpperCamelCase__ :Any = key if flax_key in special_pt_names: UpperCamelCase__ :Tuple = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( f'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' f'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict UpperCamelCase__ :Dict = np.asarray(__a ) if not isinstance(__a , np.ndarray ) else flax_tensor UpperCamelCase__ :Optional[int] = torch.from_numpy(__a ) # remove from missing keys missing_keys.remove(__a ) else: # weight is not expected by PyTorch model unexpected_keys.append(__a ) pt_model.load_state_dict(__a ) # re-transform missing_keys to list UpperCamelCase__ :List[str] = list(__a ) if len(__a ) > 0: logger.warning( '''Some weights of the Flax model were not used when initializing the PyTorch model''' f''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' f''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ''' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This''' f''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ''' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a''' ''' FlaxBertForSequenceClassification model).''' ) else: logger.warning(f'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__a ) > 0: logger.warning( f'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' f''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ''' use it for predictions and inference.''' ) else: logger.warning( f'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' '''If your task is similar to the task the model of the checkpoint was trained on, ''' f'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
97
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
0
"""simple docstring""" from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_herbert import HerbertTokenizer lowerCAmelCase__ : Tuple = logging.get_logger(__name__) lowerCAmelCase__ : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} lowerCAmelCase__ : Tuple = { 'vocab_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json' }, 'merges_file': { 'allegro/herbert-base-cased': 'https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt' }, } lowerCAmelCase__ : Any = {'allegro/herbert-base-cased': 514} lowerCAmelCase__ : Optional[Any] = {} class snake_case ( __UpperCAmelCase ): """simple docstring""" snake_case__ = VOCAB_FILES_NAMES snake_case__ = PRETRAINED_VOCAB_FILES_MAP snake_case__ = PRETRAINED_INIT_CONFIGURATION snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES snake_case__ = HerbertTokenizer def __init__( self : Tuple ,lowerCamelCase__ : int=None ,lowerCamelCase__ : int=None ,lowerCamelCase__ : Optional[Any]=None ,lowerCamelCase__ : Union[str, Any]="<s>" ,lowerCamelCase__ : Union[str, Any]="<unk>" ,lowerCamelCase__ : str="<pad>" ,lowerCamelCase__ : List[Any]="<mask>" ,lowerCamelCase__ : List[str]="</s>" ,**lowerCamelCase__ : Optional[int] ,): super().__init__( lowerCamelCase__ ,lowerCamelCase__ ,tokenizer_file=lowerCamelCase__ ,cls_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,mask_token=lowerCamelCase__ ,sep_token=lowerCamelCase__ ,**lowerCamelCase__ ,) def __lowerCAmelCase ( self : List[str] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): UpperCAmelCase__ = [self.cls_token_id] UpperCAmelCase__ = [self.sep_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __lowerCAmelCase ( self : List[Any] ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ): if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(lowerCamelCase__ )) + [1] return [1] + ([0] * len(lowerCamelCase__ )) + [1] + ([0] * len(lowerCamelCase__ )) + [1] def __lowerCAmelCase ( self : int ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ): UpperCAmelCase__ = [self.sep_token_id] UpperCAmelCase__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCAmelCase ( self : Optional[int] ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ): UpperCAmelCase__ = self._tokenizer.model.save(lowerCamelCase__ ,name=lowerCamelCase__ ) return tuple(lowerCamelCase__ )
98
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
0
# Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position lowercase : str = """2.13.1""" import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse("""3.7"""): raise ImportWarning( """To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition.""" ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( """To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n""" """If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.""" ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip lowercase : Optional[Any] = concatenate_datasets lowercase : List[Any] = DownloadConfig lowercase : List[str] = DownloadManager lowercase : int = DownloadMode lowercase : Optional[Any] = DownloadConfig lowercase : Union[str, Any] = DownloadMode lowercase : Dict = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
99
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
0
"""simple docstring""" # Author: OMKAR PATHAK, Nwachukwu Chidiebere # Use a Python dictionary to construct the graph. from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar __magic_name__ = TypeVar("T") class SCREAMING_SNAKE_CASE_ ( Generic[T] ): """simple docstring""" def __init__( self , lowerCAmelCase__ = True): __SCREAMING_SNAKE_CASE = {} # dictionary of lists __SCREAMING_SNAKE_CASE = directed def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__) self.adj_list[destination_vertex].append(lowerCAmelCase__) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: __SCREAMING_SNAKE_CASE = [destination_vertex] __SCREAMING_SNAKE_CASE = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: __SCREAMING_SNAKE_CASE = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: __SCREAMING_SNAKE_CASE = [destination_vertex] __SCREAMING_SNAKE_CASE = [] return self def __repr__( self): return pformat(self.adj_list)
100
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
0
import re def UpperCamelCase ( lowerCAmelCase__ ): '''simple docstring''' if len(re.findall('''[ATCG]''' , lowerCAmelCase__ ) ) != len(lowerCAmelCase__ ): raise ValueError('''Invalid Strand''' ) return dna.translate(dna.maketrans('''ATCG''' , '''TAGC''' ) ) if __name__ == "__main__": import doctest doctest.testmod()
101
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
0
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging SCREAMING_SNAKE_CASE : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name class _UpperCAmelCase ( __snake_case ): '''simple docstring''' def __init__(self , a_ , a_ , a_ , a_ , a_ , a_ , a_ , ): '''simple docstring''' super().__init__() self.register_modules( vae=a_ , text_encoder=a_ , tokenizer=a_ , unet=a_ , scheduler=a_ , safety_checker=a_ , feature_extractor=a_ , ) def SCREAMING_SNAKE_CASE (self , a_ = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __snake_case : Optional[int] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(a_ ) def SCREAMING_SNAKE_CASE (self ): '''simple docstring''' self.enable_attention_slicing(a_ ) @torch.no_grad() def __call__(self , a_ , a_ = 5_12 , a_ = 5_12 , a_ = 50 , a_ = 7.5 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = None , a_ = None , a_ = "pil" , a_ = True , a_ = None , a_ = 1 , a_ = None , **a_ , ): '''simple docstring''' if isinstance(a_ , a_ ): __snake_case : Any = 1 elif isinstance(a_ , a_ ): __snake_case : Any = len(a_ ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(a_ )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(a_ , a_ ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(a_ )}.""" ) # get prompt text embeddings __snake_case : int = self.tokenizer( a_ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) __snake_case : int = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __snake_case : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) __snake_case : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __snake_case : Optional[int] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __snake_case , __snake_case , __snake_case : Union[str, Any] = text_embeddings.shape __snake_case : Optional[int] = text_embeddings.repeat(1 , a_ , 1 ) __snake_case : Dict = text_embeddings.view(bs_embed * num_images_per_prompt , a_ , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __snake_case : Dict = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __snake_case : List[str] if negative_prompt is None: __snake_case : List[Any] = [''''''] elif type(a_ ) is not type(a_ ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(a_ )} !=""" f""" {type(a_ )}.""" ) elif isinstance(a_ , a_ ): __snake_case : List[str] = [negative_prompt] elif batch_size != len(a_ ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(a_ )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ''' the batch size of `prompt`.''' ) else: __snake_case : Optional[int] = negative_prompt __snake_case : Optional[int] = text_input_ids.shape[-1] __snake_case : List[Any] = self.tokenizer( a_ , padding='''max_length''' , max_length=a_ , truncation=a_ , return_tensors='''pt''' , ) __snake_case : str = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __snake_case : str = uncond_embeddings.shape[1] __snake_case : int = uncond_embeddings.repeat(a_ , a_ , 1 ) __snake_case : int = uncond_embeddings.view(batch_size * num_images_per_prompt , a_ , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __snake_case : Any = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __snake_case : Any = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __snake_case : Dict = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __snake_case : Dict = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __snake_case : Union[str, Any] = torch.randn( a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to(self.device ) __snake_case : Tuple = torch.randn(a_ , generator=a_ , device='''cpu''' , dtype=a_ ).to( self.device ) else: __snake_case : Dict = torch.randn( a_ , generator=a_ , device=self.device , dtype=a_ ) __snake_case : Dict = torch.randn(a_ , generator=a_ , device=self.device , dtype=a_ ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) __snake_case : Union[str, Any] = latents_reference.to(self.device ) __snake_case : Dict = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __snake_case : int = (latents_shape[3] - latents_shape_reference[3]) // 2 __snake_case : Tuple = (latents_shape[2] - latents_shape_reference[2]) // 2 __snake_case : Optional[int] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __snake_case : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __snake_case : int = 0 if dx < 0 else dx __snake_case : Union[str, Any] = 0 if dy < 0 else dy __snake_case : str = max(-dx , 0 ) __snake_case : Tuple = max(-dy , 0 ) # import pdb # pdb.set_trace() __snake_case : Any = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(a_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __snake_case : Optional[Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __snake_case : List[Any] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __snake_case : Any = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __snake_case : Tuple = {} if accepts_eta: __snake_case : List[str] = eta for i, t in enumerate(self.progress_bar(a_ ) ): # expand the latents if we are doing classifier free guidance __snake_case : str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __snake_case : Tuple = self.scheduler.scale_model_input(a_ , a_ ) # predict the noise residual __snake_case : int = self.unet(a_ , a_ , encoder_hidden_states=a_ ).sample # perform guidance if do_classifier_free_guidance: __snake_case , __snake_case : Tuple = noise_pred.chunk(2 ) __snake_case : str = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __snake_case : Optional[Any] = self.scheduler.step(a_ , a_ , a_ , **a_ ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(a_ , a_ , a_ ) __snake_case : Union[str, Any] = 1 / 0.1_8215 * latents __snake_case : Optional[Any] = self.vae.decode(a_ ).sample __snake_case : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __snake_case : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __snake_case : Optional[int] = self.feature_extractor(self.numpy_to_pil(a_ ) , return_tensors='''pt''' ).to( self.device ) __snake_case , __snake_case : List[Any] = self.safety_checker( images=a_ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __snake_case : Union[str, Any] = None if output_type == "pil": __snake_case : Union[str, Any] = self.numpy_to_pil(a_ ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=a_ , nsfw_content_detected=a_ )
102
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
0
import math def UpperCamelCase( __UpperCamelCase : int ): if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 ,int(math.sqrt(__UpperCamelCase ) + 1 ) ,6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def UpperCamelCase( __UpperCamelCase : float = 0.1 ): lowerCAmelCase_ : Optional[Any] = 3 lowerCAmelCase_ : List[str] = 3 while primes / (2 * j - 1) >= ratio: for i in range(j * j + j + 1 ,(j + 2) * (j + 2) ,j + 1 ): primes += is_prime(__UpperCamelCase ) j += 2 return j if __name__ == "__main__": import doctest doctest.testmod()
103
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
0
'''simple docstring''' from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def _A ( A__ = "laptop" ): """simple docstring""" __lowercase = F"https://www.amazon.in/laptop/s?k={product}" __lowercase = { '''User-Agent''': '''Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36''', '''Accept-Language''': '''en-US, en;q=0.5''', } __lowercase = BeautifulSoup(requests.get(A__ , headers=A__ ).text ) # Initialize a Pandas dataframe with the column titles __lowercase = DataFrame( columns=[ '''Product Title''', '''Product Link''', '''Current Price of the product''', '''Product Rating''', '''MRP of the product''', '''Discount''', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( '''div''' , attrs={'''class''': '''s-result-item''', '''data-component-type''': '''s-search-result'''} , ) , soup.find_all('''div''' , attrs={'''class''': '''a-row a-size-base a-color-base'''} ) , ): try: __lowercase = item.ha.text __lowercase = '''https://www.amazon.in/''' + item.ha.a['''href'''] __lowercase = item.find('''span''' , attrs={'''class''': '''a-offscreen'''} ).text try: __lowercase = item.find('''span''' , attrs={'''class''': '''a-icon-alt'''} ).text except AttributeError: __lowercase = '''Not available''' try: __lowercase = ( '''₹''' + item.find( '''span''' , attrs={'''class''': '''a-price a-text-price'''} ).text.split('''₹''' )[1] ) except AttributeError: __lowercase = '''''' try: __lowercase = float( ( ( float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) - float(product_price.strip('''₹''' ).replace(''',''' , '''''' ) ) ) / float(product_mrp.strip('''₹''' ).replace(''',''' , '''''' ) ) ) * 100 ) except ValueError: __lowercase = float('''nan''' ) except AttributeError: pass __lowercase = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] __lowercase = ''' ''' __lowercase = ''' ''' data_frame.index += 1 return data_frame if __name__ == "__main__": lowerCAmelCase__ = '''headphones''' get_amazon_product_data(product).to_csv(f'Amazon Product Data for {product}.csv')
104
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
0
"""simple docstring""" import warnings from transformers import AutoTokenizer from transformers.utils import is_torch_available from transformers.utils.generic import ExplicitEnum from ...processing_utils import ProcessorMixin if is_torch_available(): import torch class __UpperCamelCase ( a__ ): lowerCamelCase : List[str] ="""char""" lowerCamelCase : Optional[Any] ="""bpe""" lowerCamelCase : int ="""wp""" a : Optional[int] = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE) class __UpperCamelCase ( a__ ): lowerCamelCase : Any =["""image_processor""", """char_tokenizer"""] lowerCamelCase : int ="""ViTImageProcessor""" lowerCamelCase : Any ="""MgpstrTokenizer""" def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[Any]: a : Dict = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowerCAmelCase__ , ) a : Optional[int] = kwargs.pop("feature_extractor" ) a : Optional[Any] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) a : Tuple = tokenizer a : int = AutoTokenizer.from_pretrained("gpt2" ) a : Tuple = AutoTokenizer.from_pretrained("bert-base-uncased" ) super().__init__(lowerCAmelCase__ , lowerCAmelCase__ ) def __call__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Optional[int]: if images is None and text is None: raise ValueError("You need to specify either an `images` or `text` input to process." ) if images is not None: a : Dict = self.image_processor(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is not None: a : Tuple = self.char_tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ ) if text is None: return inputs elif images is None: return encodings else: a : Tuple = encodings["input_ids"] return inputs def __a ( self , lowerCAmelCase__ ) -> Dict: a, a, a : str = sequences a : int = char_preds.size(0 ) a, a : Union[str, Any] = self._decode_helper(lowerCAmelCase__ , "char" ) a, a : Dict = self._decode_helper(lowerCAmelCase__ , "bpe" ) a, a : Any = self._decode_helper(lowerCAmelCase__ , "wp" ) a : Dict = [] a : int = [] for i in range(lowerCAmelCase__ ): a : Union[str, Any] = [char_scores[i], bpe_scores[i], wp_scores[i]] a : Optional[int] = [char_strs[i], bpe_strs[i], wp_strs[i]] a : str = scores.index(max(lowerCAmelCase__ ) ) final_strs.append(strs[max_score_index] ) final_scores.append(scores[max_score_index] ) a : List[str] = {} a : int = final_strs a : List[str] = final_scores a : List[Any] = char_strs a : Tuple = bpe_strs a : Any = wp_strs return out def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int: if format == DecodeType.CHARACTER: a : Optional[int] = self.char_decode a : Optional[int] = 1 a : Optional[int] = "[s]" elif format == DecodeType.BPE: a : Any = self.bpe_decode a : Union[str, Any] = 2 a : str = "#" elif format == DecodeType.WORDPIECE: a : Dict = self.wp_decode a : str = 102 a : Optional[Any] = "[SEP]" else: raise ValueError(f"""Format {format} is not supported.""" ) a, a : List[Any] = [], [] a : Optional[int] = pred_logits.size(0 ) a : List[Any] = pred_logits.size(1 ) a, a : List[Any] = pred_logits.topk(1 , dim=-1 , largest=lowerCAmelCase__ , sorted=lowerCAmelCase__ ) a : List[str] = preds_index.view(-1 , lowerCAmelCase__ )[:, 1:] a : Dict = decoder(lowerCAmelCase__ ) a, a : Dict = torch.nn.functional.softmax(lowerCAmelCase__ , dim=2 ).max(dim=2 ) a : List[Any] = preds_max_prob[:, 1:] for index in range(lowerCAmelCase__ ): a : Optional[int] = preds_str[index].find(lowerCAmelCase__ ) a : List[Any] = preds_str[index][:pred_eos] a : Optional[int] = preds_index[index].cpu().tolist() a : Any = pred_index.index(lowerCAmelCase__ ) if eos_token in pred_index else -1 a : Dict = preds_max_prob[index][: pred_eos_index + 1] a : List[str] = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0 dec_strs.append(lowerCAmelCase__ ) conf_scores.append(lowerCAmelCase__ ) return dec_strs, conf_scores def __a ( self , lowerCAmelCase__ ) -> str: a : List[Any] = [seq.replace(" " , "" ) for seq in self.char_tokenizer.batch_decode(lowerCAmelCase__ )] return decode_strs def __a ( self , lowerCAmelCase__ ) -> str: return self.bpe_tokenizer.batch_decode(lowerCAmelCase__ ) def __a ( self , lowerCAmelCase__ ) -> Dict: a : Optional[int] = [seq.replace(" " , "" ) for seq in self.wp_tokenizer.batch_decode(lowerCAmelCase__ )] return decode_strs
105
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
0
"""simple docstring""" from __future__ import annotations import typing from collections.abc import Iterable import numpy as np __UpperCamelCase : Optional[int] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007 __UpperCamelCase : Any = typing.Union[np.floataa, int, float] # noqa: UP007 def __SCREAMING_SNAKE_CASE ( A_ , A_ ): return np.sqrt(np.sum((np.asarray(A_ ) - np.asarray(A_ )) ** 2 ) ) def __SCREAMING_SNAKE_CASE ( A_ , A_ ): return sum((va - va) ** 2 for va, va in zip(A_ , A_ ) ) ** (1 / 2) if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( ): from timeit import timeit print('''Without Numpy''' ) print( timeit( '''euclidean_distance_no_np([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) ) print('''With Numpy''' ) print( timeit( '''euclidean_distance([1, 2, 3], [4, 5, 6])''' , number=1_00_00 , globals=globals() , ) ) benchmark()
106
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
0
import random import unittest import numpy as np import transformers from transformers import is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax if is_flax_available(): import os import jax.numpy as jnp from jax import jit from transformers import AutoTokenizer, FlaxAutoModelForCausalLM from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model __lowerCAmelCase : List[Any] = '0.12' # assumed parallelism: 8 if is_torch_available(): import torch def __magic_name__ ( A : Dict, A : Union[str, Any], A : Optional[int]=None ): '''simple docstring''' if rng is None: a = random.Random() a = 1 for dim in shape: total_dims *= dim a = [] for _ in range(A ): values.append(rng.randint(0, vocab_size - 1 ) ) a = np.array(A, dtype=jnp.intaa ).reshape(A ) return output def __magic_name__ ( A : Dict, A : Union[str, Any]=None ): '''simple docstring''' a = ids_tensor(A, vocab_size=2, rng=A ) # make sure that at least one token is attended to for each batch a = 1 return attn_mask @require_flax class snake_case__ : """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = None SCREAMING_SNAKE_CASE_ : Any = () def __UpperCAmelCase ( self : int ) -> List[str]: a , a = self.model_tester.prepare_config_and_inputs_for_common() # cut to half length & take max batch_size 3 a = 2 a = inputs["input_ids"].shape[-1] // 2 a = inputs["input_ids"][:max_batch_size, :sequence_length] a = jnp.ones_like(__lowerCamelCase ) a = attention_mask[:max_batch_size, :sequence_length] # generate max 5 tokens a = input_ids.shape[-1] + 5 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` a = config.eos_token_id return config, input_ids, attention_mask, max_length @is_pt_flax_cross_test def __UpperCAmelCase ( self : Optional[Any] ) -> int: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length a = 0 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model_class.__name__[4:] # Skip the "Flax" at the beginning a = getattr(__lowerCamelCase , __lowerCamelCase ) a = pt_model_class(__lowerCamelCase ).eval() a = load_flax_weights_in_pytorch_model(__lowerCamelCase , flax_model.params ) a = flax_model.generate(__lowerCamelCase ).sequences a = pt_model.generate(torch.tensor(__lowerCamelCase , dtype=torch.long ) ) if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]: a = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]] self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() ) def __UpperCAmelCase ( self : List[str] ) -> Optional[int]: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Optional[int] ) -> Any: a , a , a , a = self._get_input_ids_and_config() a = True a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : int ) -> Dict: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length a = 2 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Any ) -> Union[str, Any]: a , a , a , a = self._get_input_ids_and_config() a = False a = max_length a = 2 a = 2 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences ) def __UpperCAmelCase ( self : Optional[Any] ) -> Dict: a , a , a , a = self._get_input_ids_and_config() a = True a = max_length a = 0.8 a = 10 a = 0.3 a = 1 a = 8 a = 9 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a , a , a , a = self._get_input_ids_and_config() a = max_length a = 1 a = 8 a = 9 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : List[str] ) -> Union[str, Any]: a , a , a , a = self._get_input_ids_and_config() a = max_length a = 2 a = 1 a = 8 a = 9 for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Union[str, Any] ) -> Dict: a , a , a , a = self._get_input_ids_and_config() # pad attention mask on the left a = attention_mask.at[(0, 0)].set(0 ) a = False a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Tuple ) -> Tuple: a , a , a , a = self._get_input_ids_and_config() # pad attention mask on the left a = attention_mask.at[(0, 0)].set(0 ) a = True a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) def __UpperCAmelCase ( self : Optional[int] ) -> List[Any]: a , a , a , a = self._get_input_ids_and_config() # pad attention mask on the left a = attention_mask.at[(0, 0)].set(0 ) a = 2 a = max_length for model_class in self.all_generative_model_classes: a = model_class(__lowerCamelCase ) a = model.generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertEqual(generation_outputs.shape[-1] , __lowerCamelCase ) a = jit(model.generate ) a = jit_generate(__lowerCamelCase , attention_mask=__lowerCamelCase ).sequences self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() ) @require_flax class snake_case__ (unittest.TestCase ): """simple docstring""" def __UpperCAmelCase ( self : Dict ) -> Optional[Any]: a = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-bert" ) a = FlaxAutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) a = "Hello world" a = tokenizer(__lowerCamelCase , return_tensors="np" ).input_ids # typos are quickly detected (the correct argument is `do_sample`) with self.assertRaisesRegex(__lowerCamelCase , "do_samples" ): model.generate(__lowerCamelCase , do_samples=__lowerCamelCase ) # arbitrary arguments that will not be used anywhere are also not accepted with self.assertRaisesRegex(__lowerCamelCase , "foo" ): a = {"foo": "bar"} model.generate(__lowerCamelCase , **__lowerCamelCase )
107
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
0
"""simple docstring""" import darl # noqa import gym import tqdm from diffusers.experimental import ValueGuidedRLPipeline lowerCAmelCase__ = { '''n_samples''': 64, '''horizon''': 32, '''num_inference_steps''': 20, '''n_guide_steps''': 2, # can set to 0 for faster sampling, does not use value network '''scale_grad_by_std''': True, '''scale''': 0.1, '''eta''': 0.0, '''t_grad_cutoff''': 2, '''device''': '''cpu''', } if __name__ == "__main__": lowerCAmelCase__ = '''hopper-medium-v2''' lowerCAmelCase__ = gym.make(env_name) lowerCAmelCase__ = ValueGuidedRLPipeline.from_pretrained( '''bglick13/hopper-medium-v2-value-function-hor32''', env=env, ) env.seed(0) lowerCAmelCase__ = env.reset() lowerCAmelCase__ = 0 lowerCAmelCase__ = 0 lowerCAmelCase__ = 1_000 lowerCAmelCase__ = [obs.copy()] try: for t in tqdm.tqdm(range(T)): # call the policy lowerCAmelCase__ = pipeline(obs, planning_horizon=32) # execute action in environment lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = env.step(denorm_actions) lowerCAmelCase__ = env.get_normalized_score(total_reward) # update return total_reward += reward total_score += score print( F"Step: {t}, Reward: {reward}, Total Reward: {total_reward}, Score: {score}, Total Score:" F" {total_score}" ) # save observations for rendering rollout.append(next_observation.copy()) lowerCAmelCase__ = next_observation except KeyboardInterrupt: pass print(F"Total reward: {total_reward}")
108
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
0
"""simple docstring""" from collections.abc import Sequence def _snake_case ( UpperCamelCase : Sequence[int] | None = None ): if nums is None or not nums: raise ValueError("""Input sequence should not be empty""" ) UpperCAmelCase : Optional[Any] = nums[0] for i in range(1 , len(UpperCamelCase ) ): UpperCAmelCase : Tuple = nums[i] UpperCAmelCase : Union[str, Any] = max(UpperCamelCase , ans + num , UpperCamelCase ) return ans if __name__ == "__main__": import doctest doctest.testmod() # Try on a sample input from the user A: int = int(input("Enter number of elements : ").strip()) A: int = list(map(int, input("\nEnter the numbers : ").strip().split()))[:n] print(max_subsequence_sum(array))
109
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : Optional[Any] = {"""vocab_file""": """sentencepiece.bpe.model"""} __SCREAMING_SNAKE_CASE : Any = { """vocab_file""": { """camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""", } } __SCREAMING_SNAKE_CASE : int = { """camembert-base""": 512, } __SCREAMING_SNAKE_CASE : List[Any] = """▁""" class __A (snake_case__): '''simple docstring''' __lowercase: int = VOCAB_FILES_NAMES __lowercase: List[str] = PRETRAINED_VOCAB_FILES_MAP __lowercase: int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __lowercase: List[str] = ["""input_ids""", """attention_mask"""] def __init__( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : List[Any]="</s>" , UpperCAmelCase_ : int="</s>" , UpperCAmelCase_ : List[Any]="<s>" , UpperCAmelCase_ : Union[str, Any]="<unk>" , UpperCAmelCase_ : Tuple="<pad>" , UpperCAmelCase_ : Any="<mask>" , UpperCAmelCase_ : Any=["<s>NOTUSED", "</s>NOTUSED"] , UpperCAmelCase_ : Optional[int] = None , **UpperCAmelCase_ : List[str] , ) ->None: """simple docstring""" snake_case_ = AddedToken(UpperCAmelCase_ , lstrip=UpperCAmelCase_ , rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else mask_token snake_case_ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCAmelCase_ , eos_token=UpperCAmelCase_ , unk_token=UpperCAmelCase_ , sep_token=UpperCAmelCase_ , cls_token=UpperCAmelCase_ , pad_token=UpperCAmelCase_ , mask_token=UpperCAmelCase_ , additional_special_tokens=UpperCAmelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase_ , ) snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCAmelCase_ ) ) snake_case_ = vocab_file # HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual # sentencepiece vocabulary (this is the case for <s> and </s> snake_case_ = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3} snake_case_ = len(self.fairseq_tokens_to_ids ) snake_case_ = len(self.sp_model ) + len(self.fairseq_tokens_to_ids ) snake_case_ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int = None ) ->List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] snake_case_ = [self.cls_token_id] snake_case_ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Dict = False ) ->List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCAmelCase_ , token_ids_a=UpperCAmelCase_ , already_has_special_tokens=UpperCAmelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCAmelCase_ )) + [1] return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1] def lowerCAmelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Dict = None ) ->List[int]: """simple docstring""" snake_case_ = [self.sep_token_id] snake_case_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCAmelCase ( self : Dict ) ->List[Any]: """simple docstring""" return len(self.fairseq_tokens_to_ids ) + len(self.sp_model ) def lowerCAmelCase ( self : Tuple ) ->Optional[Any]: """simple docstring""" snake_case_ = {self.convert_ids_to_tokens(UpperCAmelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int ) ->List[str]: """simple docstring""" return self.sp_model.encode(UpperCAmelCase_ , out_type=UpperCAmelCase_ ) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[Any] ) ->Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] elif self.sp_model.PieceToId(UpperCAmelCase_ ) == 0: # Convert sentence piece unk token to fairseq unk token index return self.unk_token_id return self.fairseq_offset + self.sp_model.PieceToId(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Tuple ) ->Tuple: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : int ) ->Optional[Any]: """simple docstring""" snake_case_ = [] snake_case_ = """""" snake_case_ = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCAmelCase_ ) + token snake_case_ = True snake_case_ = [] else: current_sub_tokens.append(UpperCAmelCase_ ) snake_case_ = False out_string += self.sp_model.decode(UpperCAmelCase_ ) return out_string.strip() def __getstate__( self : List[Any] ) ->Optional[int]: """simple docstring""" snake_case_ = self.__dict__.copy() snake_case_ = None return state def __setstate__( self : List[str] , UpperCAmelCase_ : List[str] ) ->Any: """simple docstring""" snake_case_ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): snake_case_ = {} snake_case_ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : Any , UpperCAmelCase_ : Dict = None ) ->Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCAmelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return snake_case_ = os.path.join( UpperCAmelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCAmelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCAmelCase_ , """wb""" ) as fi: snake_case_ = self.sp_model.serialized_model_proto() fi.write(UpperCAmelCase_ ) return (out_vocab_file,)
347
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
0
import argparse import collections import numpy as np import torch from flax import traverse_util from tax import checkpoints from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any]) -> Dict: '''simple docstring''' return params[F'{prefix}/{prefix}/relpos_bias/rel_embedding'][:, i, :] def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : int="attention") -> Any: '''simple docstring''' __UpperCamelCase : List[str] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/key/kernel'][:, i, :, :]) __UpperCamelCase : Optional[Any] = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2]) __UpperCamelCase : str = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/out/kernel'][:, i, :, :]) __UpperCamelCase : int = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2]) __UpperCamelCase : Optional[int] = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/query/kernel'][:, i, :, :]) __UpperCamelCase : Any = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2]) __UpperCamelCase : Dict = np.ascontiguousarray(params[F'{prefix}/{prefix}/{layer_name}/value/kernel'][:, i, :, :]) __UpperCamelCase : str = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2]) return k, o, q, v def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : int=False) -> Dict: '''simple docstring''' if split_mlp_wi: __UpperCamelCase : List[Any] = params[F'{prefix}/{prefix}/mlp/wi_0/kernel'][:, i, :] __UpperCamelCase : List[Any] = params[F'{prefix}/{prefix}/mlp/wi_1/kernel'][:, i, :] __UpperCamelCase : List[str] = (wi_a, wi_a) else: __UpperCamelCase : List[str] = params[F'{prefix}/{prefix}/mlp/wi/kernel'][:, i, :] __UpperCamelCase : Tuple = params[F'{prefix}/{prefix}/mlp/wo/kernel'][:, i, :] return wi, wo def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str]) -> int: '''simple docstring''' return params[F'{prefix}/{prefix}/{layer_name}/scale'][:, i] def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , *, _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Dict = False) -> List[str]: '''simple docstring''' __UpperCamelCase : str = traverse_util.flatten_dict(variables["target"]) __UpperCamelCase : Dict = {"""/""".join(_lowercase): v for k, v in old.items()} # v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi __UpperCamelCase : Any = """encoder/encoder/mlp/wi_0/kernel""" in old print("Split MLP:" , _lowercase) __UpperCamelCase : int = collections.OrderedDict() # Shared embeddings. __UpperCamelCase : Any = old["""token_embedder/embedding"""] # Encoder. for i in range(_lowercase): # Block i, layer 0 (Self Attention). __UpperCamelCase : str = tax_layer_norm_lookup(_lowercase , _lowercase , "encoder" , "pre_attention_layer_norm") __UpperCamelCase : Tuple = tax_attention_lookup(_lowercase , _lowercase , "encoder" , "attention") __UpperCamelCase : Optional[Any] = layer_norm __UpperCamelCase : Union[str, Any] = k.T __UpperCamelCase : Union[str, Any] = o.T __UpperCamelCase : Tuple = q.T __UpperCamelCase : str = v.T # Block i, layer 1 (MLP). __UpperCamelCase : Optional[int] = tax_layer_norm_lookup(_lowercase , _lowercase , "encoder" , "pre_mlp_layer_norm") __UpperCamelCase : List[Any] = tax_mlp_lookup(_lowercase , _lowercase , "encoder" , _lowercase) __UpperCamelCase : Any = layer_norm if split_mlp_wi: __UpperCamelCase : Tuple = wi[0].T __UpperCamelCase : List[Any] = wi[1].T else: __UpperCamelCase : List[Any] = wi.T __UpperCamelCase : Tuple = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase : Union[str, Any] = tax_relpos_bias_lookup( _lowercase , _lowercase , "encoder").T __UpperCamelCase : List[Any] = old["""encoder/encoder_norm/scale"""] if not scalable_attention: __UpperCamelCase : Optional[int] = tax_relpos_bias_lookup( _lowercase , 0 , "encoder").T __UpperCamelCase : List[Any] = tax_relpos_bias_lookup( _lowercase , 0 , "decoder").T if not is_encoder_only: # Decoder. for i in range(_lowercase): # Block i, layer 0 (Self Attention). __UpperCamelCase : List[Any] = tax_layer_norm_lookup(_lowercase , _lowercase , "decoder" , "pre_self_attention_layer_norm") __UpperCamelCase : Dict = tax_attention_lookup(_lowercase , _lowercase , "decoder" , "self_attention") __UpperCamelCase : List[Any] = layer_norm __UpperCamelCase : str = k.T __UpperCamelCase : Dict = o.T __UpperCamelCase : str = q.T __UpperCamelCase : Optional[int] = v.T # Block i, layer 1 (Cross Attention). __UpperCamelCase : Union[str, Any] = tax_layer_norm_lookup(_lowercase , _lowercase , "decoder" , "pre_cross_attention_layer_norm") __UpperCamelCase : Any = tax_attention_lookup(_lowercase , _lowercase , "decoder" , "encoder_decoder_attention") __UpperCamelCase : Optional[int] = layer_norm __UpperCamelCase : Union[str, Any] = k.T __UpperCamelCase : Tuple = o.T __UpperCamelCase : int = q.T __UpperCamelCase : Optional[int] = v.T # Block i, layer 2 (MLP). __UpperCamelCase : Any = tax_layer_norm_lookup(_lowercase , _lowercase , "decoder" , "pre_mlp_layer_norm") __UpperCamelCase : List[str] = tax_mlp_lookup(_lowercase , _lowercase , "decoder" , _lowercase) __UpperCamelCase : List[str] = layer_norm if split_mlp_wi: __UpperCamelCase : int = wi[0].T __UpperCamelCase : Optional[int] = wi[1].T else: __UpperCamelCase : Union[str, Any] = wi.T __UpperCamelCase : Tuple = wo.T if scalable_attention: # convert the rel_embedding of each layer __UpperCamelCase : Tuple = tax_relpos_bias_lookup(_lowercase , _lowercase , "decoder").T __UpperCamelCase : int = old["""decoder/decoder_norm/scale"""] # LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead) if "decoder/logits_dense/kernel" in old: __UpperCamelCase : Dict = old["""decoder/logits_dense/kernel"""].T return new def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str]) -> Tuple: '''simple docstring''' __UpperCamelCase : Union[str, Any] = collections.OrderedDict([(k, torch.from_numpy(v.copy())) for (k, v) in converted_params.items()]) # Add what is missing. if "encoder.embed_tokens.weight" not in state_dict: __UpperCamelCase : Union[str, Any] = state_dict["""shared.weight"""] if not is_encoder_only: if "decoder.embed_tokens.weight" not in state_dict: __UpperCamelCase : Optional[int] = state_dict["""shared.weight"""] if "lm_head.weight" not in state_dict: # For old 1.0 models. print("Using shared word embeddings as lm_head.") __UpperCamelCase : Any = state_dict["""shared.weight"""] return state_dict def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any) -> Any: '''simple docstring''' __UpperCamelCase : Union[str, Any] = checkpoints.load_tax_checkpoint(_lowercase) __UpperCamelCase : Optional[int] = convert_tax_to_pytorch( _lowercase , num_layers=config.num_layers , is_encoder_only=_lowercase , scalable_attention=_lowercase) __UpperCamelCase : str = make_state_dict(_lowercase , _lowercase) model.load_state_dict(_lowercase , strict=_lowercase) def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple = False , _lowerCamelCase : Tuple = False , ) -> Optional[int]: '''simple docstring''' __UpperCamelCase : Optional[Any] = MTaConfig.from_json_file(_lowercase) print(F'Building PyTorch model from configuration: {config}') # Non-v1.1 checkpoints could also use T5Model, but this works for all. # The v1.0 checkpoints will simply have an LM head that is the word embeddings. if is_encoder_only: __UpperCamelCase : Optional[int] = UMTaEncoderModel(_lowercase) else: __UpperCamelCase : Dict = UMTaForConditionalGeneration(_lowercase) # Load weights from tf checkpoint load_tax_weights_in_ta(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}') model.save_pretrained(_lowercase) # Verify that we can load the checkpoint. model.from_pretrained(_lowercase) print("Done") if __name__ == "__main__": lowercase : Optional[int] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.') # Required parameters parser.add_argument( '--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) parser.add_argument( '--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False ) parser.add_argument( '--scalable_attention', action='store_true', help='Whether the model uses scaled attention (umt5 model)', default=False, ) lowercase : List[str] = parser.parse_args() convert_tax_checkpoint_to_pytorch( args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only, args.scalable_attention, )
232
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
0
import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _lowercase: Optional[int] = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _lowercase ( datasets.BuilderConfig ): """simple docstring""" __A = None def a( A : Dict , A : Any , ) -> List[str]: """simple docstring""" import pyspark def generate_fn(): a = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) ) for partition_id in partition_order: a = df_with_partition_id.select("*" ).where(f'''part_id = {partition_id}''' ).drop("part_id" ) a = partition_df.collect() a = 0 for row in rows: yield f'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class _lowercase ( _BaseExamplesIterable ): """simple docstring""" def __init__(self , lowerCamelCase_ , lowerCamelCase_=None , ): """simple docstring""" a = df a = partition_order or range(self.df.rdd.getNumPartitions() ) a = _generate_iterable_examples(self.df , self.partition_order ) def __iter__(self ): """simple docstring""" yield from self.generate_examples_fn() def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(lowerCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = self.split_shard_indices_by_worker(lowerCamelCase_ , lowerCamelCase_ ) return SparkExamplesIterable(self.df , partition_order=lowerCamelCase_ ) @property def UpperCamelCase_ (self ): """simple docstring""" return len(self.partition_order ) class _lowercase ( datasets.DatasetBuilder ): """simple docstring""" __A = SparkConfig def __init__(self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ): """simple docstring""" import pyspark a = pyspark.sql.SparkSession.builder.getOrCreate() a = df a = working_dir super().__init__( cache_dir=lowerCamelCase_ , config_name=str(self.df.semanticHash() ) , **lowerCamelCase_ , ) def UpperCamelCase_ (self ): """simple docstring""" def create_cache_and_write_probe(lowerCamelCase_ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=lowerCamelCase_ ) a = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(lowerCamelCase_ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: a = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(lowerCamelCase_ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def UpperCamelCase_ (self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" import pyspark def get_arrow_batch_size(lowerCamelCase_ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) a = self.df.count() a = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. a = ( self.df.limit(lowerCamelCase_ ) .repartition(1 ) .mapInArrow(lowerCamelCase_ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) a = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. a = min(lowerCamelCase_ , int(approx_total_size / max_shard_size ) ) a = self.df.repartition(lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): """simple docstring""" import pyspark a = ParquetWriter if file_format == """parquet""" else ArrowWriter a = os.path.join(self._working_dir , os.path.basename(lowerCamelCase_ ) ) if self._working_dir else fpath a = file_format == """parquet""" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. a = self.config.features a = self._writer_batch_size a = self._fs.storage_options def write_arrow(lowerCamelCase_ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. a = pyspark.TaskContext().taskAttemptId() a = next(lowerCamelCase_ , lowerCamelCase_ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) a = 0 a = writer_class( features=lowerCamelCase_ , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , ) a = pa.Table.from_batches([first_batch] ) writer.write_table(lowerCamelCase_ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: a = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 a = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , writer_batch_size=lowerCamelCase_ , storage_options=lowerCamelCase_ , embed_local_files=lowerCamelCase_ , ) a = pa.Table.from_batches([batch] ) writer.write_table(lowerCamelCase_ ) if writer._num_bytes > 0: a = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(lowerCamelCase_ ) ): a = os.path.join(os.path.dirname(lowerCamelCase_ ) , os.path.basename(lowerCamelCase_ ) ) shutil.move(lowerCamelCase_ , lowerCamelCase_ ) a = ( self.df.mapInArrow(lowerCamelCase_ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ = "arrow" , lowerCamelCase_ = None , lowerCamelCase_ = None , **lowerCamelCase_ , ): """simple docstring""" self._validate_cache_dir() a = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(lowerCamelCase_ ) a = not is_remote_filesystem(self._fs ) a = os.path.join if is_local else posixpath.join a = """-TTTTT-SSSSS-of-NNNNN""" a = F'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' a = path_join(self._output_dir , lowerCamelCase_ ) a = 0 a = 0 a = 0 a = [] a = [] for task_id, content in self._prepare_split_single(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): ( a ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(lowerCamelCase_ ) a = total_num_examples a = total_num_bytes # should rename everything at the end logger.debug(F'''Renaming {total_shards} shards.''' ) if total_shards > 1: a = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. a = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , ): rename( lowerCamelCase_ , fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace("TTTTT-SSSSS" , F'''{global_shard_id:05d}''' ).replace("NNNNN" , F'''{total_shards:05d}''' ) , ) a = [] a = 0 for i in range(len(lowerCamelCase_ ) ): a = task_id_and_num_shards[i] for shard_id in range(lowerCamelCase_ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(lowerCamelCase_ , len(lowerCamelCase_ ) ).map(lambda lowerCamelCase_ : _rename_shard(*lowerCamelCase_ ) ).collect() else: # don't use any pattern a = 0 a = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F'''{shard_id:05d}''' ).replace("TTTTT" , F'''{task_id:05d}''' ) , fpath.replace(lowerCamelCase_ , "" ) , ) def UpperCamelCase_ (self , lowerCamelCase_ , ): """simple docstring""" return SparkExamplesIterable(self.df )
227
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase = { """configuration_jukebox""": [ """JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP""", """JukeboxConfig""", """JukeboxPriorConfig""", """JukeboxVQVAEConfig""", ], """tokenization_jukebox""": ["""JukeboxTokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase = [ """JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST""", """JukeboxModel""", """JukeboxPreTrainedModel""", """JukeboxVQVAE""", """JukeboxPrior""", ] if TYPE_CHECKING: from .configuration_jukebox import ( JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP, JukeboxConfig, JukeboxPriorConfig, JukeboxVQVAEConfig, ) from .tokenization_jukebox import JukeboxTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_jukebox import ( JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST, JukeboxModel, JukeboxPreTrainedModel, JukeboxPrior, JukeboxVQVAE, ) else: import sys lowercase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
178
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
0
import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaControlnetImgaImgPipeline, KandinskyVaaPriorEmbaEmbPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ): A_ : Tuple = KandinskyVaaControlnetImgaImgPipeline A_ : int = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] A_ : Optional[Any] = ['image_embeds', 'negative_image_embeds', 'image', 'hint'] A_ : str = [ 'generator', 'height', 'width', 'strength', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] A_ : Optional[int] = False @property def a (self : List[Any] ): """simple docstring""" return 32 @property def a (self : List[str] ): """simple docstring""" return 32 @property def a (self : int ): """simple docstring""" return self.time_input_dim @property def a (self : Union[str, Any] ): """simple docstring""" return self.time_input_dim * 4 @property def a (self : str ): """simple docstring""" return 100 @property def a (self : List[Any] ): """simple docstring""" torch.manual_seed(0 ) __snake_case = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __snake_case = UNetaDConditionModel(**a__ ) return model @property def a (self : Any ): """simple docstring""" return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def a (self : Tuple ): """simple docstring""" torch.manual_seed(0 ) __snake_case = VQModel(**self.dummy_movq_kwargs ) return model def a (self : Tuple ): """simple docstring""" __snake_case = self.dummy_unet __snake_case = self.dummy_movq __snake_case = { """num_train_timesteps""": 1000, """beta_schedule""": """linear""", """beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __snake_case = DDIMScheduler(**a__ ) __snake_case = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def a (self : Optional[int] , a__ : Union[str, Any] , a__ : Optional[int]=0 ): """simple docstring""" __snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(a__ ) ).to(a__ ) __snake_case = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( a__ ) # create init_image __snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ ) __snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0] __snake_case = Image.fromarray(np.uinta(a__ ) ).convert('''RGB''' ).resize((256, 256) ) # create hint __snake_case = floats_tensor((1, 3, 64, 64) , rng=random.Random(a__ ) ).to(a__ ) if str(a__ ).startswith('''mps''' ): __snake_case = torch.manual_seed(a__ ) else: __snake_case = torch.Generator(device=a__ ).manual_seed(a__ ) __snake_case = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def a (self : List[Any] ): """simple docstring""" __snake_case = """cpu""" __snake_case = self.get_dummy_components() __snake_case = self.pipeline_class(**a__ ) __snake_case = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) __snake_case = pipe(**self.get_dummy_inputs(a__ ) ) __snake_case = output.images __snake_case = pipe( **self.get_dummy_inputs(a__ ) , return_dict=a__ , )[0] __snake_case = image[0, -3:, -3:, -1] __snake_case = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __snake_case = np.array( [0.5_4_9_8_5_0_3_4, 0.5_5_5_0_9_3_6_5, 0.5_2_5_6_1_5_0_4, 0.5_5_7_0_4_9_4, 0.5_5_9_3_8_1_8, 0.5_2_6_3_9_7_9, 0.5_0_2_8_5_6_4_3, 0.5_0_6_9_8_4_6, 0.5_1_1_9_6_7_3_6] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}""" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}""" @slow @require_torch_gpu class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def a (self : List[str] ): """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def a (self : Optional[Any] ): """simple docstring""" __snake_case = load_numpy( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' ) __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' ) __snake_case = init_image.resize((512, 512) ) __snake_case = load_image( '''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinskyv22/hint_image_cat.png''' ) __snake_case = torch.from_numpy(np.array(a__ ) ).float() / 255.0 __snake_case = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __snake_case = """A robot, 4k photo""" __snake_case = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa ) pipe_prior.to(a__ ) __snake_case = KandinskyVaaControlnetImgaImgPipeline.from_pretrained( '''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa ) __snake_case = pipeline.to(a__ ) pipeline.set_progress_bar_config(disable=a__ ) __snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 ) __snake_case = pipe_prior( a__ , image=a__ , strength=0.8_5 , generator=a__ , negative_prompt='''''' , ).to_tuple() __snake_case = pipeline( image=a__ , image_embeds=a__ , negative_image_embeds=a__ , hint=a__ , generator=a__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , ) __snake_case = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(a__ , a__ )
24
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
0
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder UpperCAmelCase = """base_with_context""" def lowercase ( a__ : str , a__ : int ) -> Union[str, Any]: _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowercase ) for lyr_num, lyr in enumerate(model.encoders ): _UpperCamelCase = weights[F'''layers_{lyr_num}'''] _UpperCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _UpperCamelCase = ly_weight["""attention"""] _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def lowercase ( a__ : str , a__ : Dict ) -> Any: _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowercase ) for lyr_num, lyr in enumerate(model.encoders ): _UpperCamelCase = weights[F'''layers_{lyr_num}'''] _UpperCamelCase = ly_weight["""attention"""] _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def lowercase ( a__ : Dict , a__ : Tuple ) -> List[str]: _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=_lowercase ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): _UpperCamelCase = weights[F'''layers_{lyr_num}'''] _UpperCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) _UpperCamelCase = ly_weight["""self_attention"""] _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _UpperCamelCase = ly_weight["""MultiHeadDotProductAttention_0"""] _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) _UpperCamelCase = nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) _UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def lowercase ( a__ : Tuple ) -> List[str]: _UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path ) _UpperCamelCase = jnp.tree_util.tree_map(onp.array , _lowercase ) _UpperCamelCase = [ """from __gin__ import dynamic_registration""", """from music_spectrogram_diffusion.models.diffusion import diffusion_utils""", """diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0""", """diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()""", ] _UpperCamelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) _UpperCamelCase = inference.parse_training_gin_file(_lowercase , _lowercase ) _UpperCamelCase = inference.InferenceModel(args.checkpoint_path , _lowercase ) _UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) _UpperCamelCase = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) _UpperCamelCase = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) _UpperCamelCase = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) _UpperCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , _lowercase ) _UpperCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , _lowercase ) _UpperCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , _lowercase ) _UpperCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) _UpperCamelCase = SpectrogramDiffusionPipeline( notes_encoder=_lowercase , continuous_encoder=_lowercase , decoder=_lowercase , scheduler=_lowercase , melgan=_lowercase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": UpperCAmelCase = argparse.ArgumentParser() parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""") parser.add_argument( """--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not.""" ) parser.add_argument( """--checkpoint_path""", default=F'''{MODEL}/checkpoint_500000''', type=str, required=False, help="""Path to the original jax model checkpoint.""", ) UpperCAmelCase = parser.parse_args() main(args)
256
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
0
from __future__ import annotations from collections.abc import Generator def lowerCAmelCase ( )-> Generator[int, None, None]: lowerCAmelCase_ : dict[int, int] = {} lowerCAmelCase_ : List[Any] = 2 while True: lowerCAmelCase_ : str = factor_map.pop(_lowercase , _lowercase ) if factor: lowerCAmelCase_ : List[str] = factor + prime while x in factor_map: x += factor lowerCAmelCase_ : List[str] = factor else: lowerCAmelCase_ : int = prime yield prime prime += 1 def lowerCAmelCase ( lowerCAmelCase_ = 1e1_0 )-> int: lowerCAmelCase_ : Optional[Any] = sieve() lowerCAmelCase_ : str = 1 while True: lowerCAmelCase_ : Optional[int] = next(_lowercase ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(_lowercase ) n += 2 if __name__ == "__main__": print(solution())
262
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
0
from scipy.stats import spearmanr import datasets lowercase_ : Optional[int] = """ The Spearman rank-order correlation coefficient is a measure of the relationship between two datasets. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Positive correlations imply that as data in dataset x increases, so does data in dataset y. Negative correlations imply that as x increases, y decreases. Correlations of -1 or +1 imply an exact monotonic relationship. Unlike the Pearson correlation, the Spearman correlation does not assume that both datasets are normally distributed. The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Spearman correlation at least as extreme as the one computed from these datasets. The p-values are not entirely reliable but are probably reasonable for datasets larger than 500 or so. """ lowercase_ : Union[str, Any] = """ Args: predictions (`List[float]`): Predicted labels, as returned by a model. references (`List[float]`): Ground truth labels. return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns only the spearmanr score. Defaults to `False`. Returns: spearmanr (`float`): Spearman correlation coefficient. p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input. Examples: Example 1: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4]) >>> print(results) {'spearmanr': -0.7} Example 2: >>> spearmanr_metric = datasets.load_metric(\"spearmanr\") >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], ... predictions=[10, 9, 2.5, 6, 4], ... return_pvalue=True) >>> print(results['spearmanr']) -0.7 >>> print(round(results['spearmanr_pvalue'], 2)) 0.19 """ lowercase_ : str = R"""\ @book{kokoska2000crc, title={CRC standard probability and statistics tables and formulae}, author={Kokoska, Stephen and Zwillinger, Daniel}, year={2000}, publisher={Crc Press} } @article{2020SciPy-NMeth, author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and Haberland, Matt and Reddy, Tyler and Cournapeau, David and Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and Bright, Jonathan and {van der Walt}, St{\'e}fan J. and Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and Kern, Robert and Larson, Eric and Carey, C J and Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and Harris, Charles R. and Archibald, Anne M. and Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and {van Mulbregt}, Paul and {SciPy 1.0 Contributors}}, title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific Computing in Python}}, journal = {Nature Methods}, year = {2020}, volume = {17}, pages = {261--272}, adsurl = {https://rdcu.be/b08Wh}, doi = {10.1038/s41592-019-0686-2}, } """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class __lowerCAmelCase ( datasets.Metric ): def UpperCamelCase ( self : Optional[Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("float" ), "references": datasets.Value("float" ), } ) , reference_urls=["https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"] , ) def UpperCamelCase ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any]=False ): """simple docstring""" _UpperCAmelCase = spearmanr(snake_case__ , snake_case__ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
133
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
0
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ): return int((input_a, input_a).count(1 ) != 0 ) def __lowerCAmelCase (): assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
234
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
0
"""simple docstring""" import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin A_ : Optional[Any] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") class lowerCamelCase (A__ ,unittest.TestCase ): lowerCamelCase__ : List[str] = BartphoTokenizer lowerCamelCase__ : Optional[int] = False lowerCamelCase__ : Optional[Any] = True def SCREAMING_SNAKE_CASE ( self : Dict ) -> Any: super().setUp() SCREAMING_SNAKE_CASE__ = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] SCREAMING_SNAKE_CASE__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE__ = {"""unk_token""": """<unk>"""} SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file , """w""" , encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F"""{token} {vocab_tokens[token]}\n""" ) SCREAMING_SNAKE_CASE__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : List[str] ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : Dict ) -> List[Any]: SCREAMING_SNAKE_CASE__ = """This is a là test""" SCREAMING_SNAKE_CASE__ = """This is a<unk><unk> test""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]: SCREAMING_SNAKE_CASE__ = BartphoTokenizer(__UpperCAmelCase , self.monolingual_vocab_file , **self.special_tokens_map ) SCREAMING_SNAKE_CASE__ = """This is a là test""" SCREAMING_SNAKE_CASE__ = """▁This ▁is ▁a ▁l à ▁t est""".split() SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token] SCREAMING_SNAKE_CASE__ = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCAmelCase ) , __UpperCAmelCase )
165
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
0
import argparse import importlib from pathlib import Path # Test all the extensions added in the setup __a = [ """kernels/rwkv/wkv_cuda.cu""", """kernels/rwkv/wkv_op.cpp""", """kernels/deformable_detr/ms_deform_attn.h""", """kernels/deformable_detr/cuda/ms_deform_im2col_cuda.cuh""", """models/graphormer/algos_graphormer.pyx""", ] def a ( snake_case__: int ): '''simple docstring''' # Test all the extensions added in the setup for file in FILES_TO_FIND: if not (transformers_path / file).exists(): return False return True if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument('--check_lib', action='store_true', help='Whether to check the build or the actual package.') __a = parser.parse_args() if args.check_lib: __a = importlib.import_module('transformers') __a = Path(transformers_module.__file__).parent else: __a = Path.cwd() / """build/lib/transformers""" if not test_custom_files_are_present(transformers_path): raise ValueError('The built release does not contain the custom files. Fix this before going further!')
30
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
0
"""simple docstring""" from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP __SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) # pylint: disable=invalid-name __SCREAMING_SNAKE_CASE : Optional[Any] = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=8 ) -> List[str]: snake_case_ = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 snake_case_ = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class __A (snake_case__): '''simple docstring''' def __init__( self : List[str] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] , ) ->List[str]: """simple docstring""" super().__init__() self.register_modules( text_encoder=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , movq=UpperCAmelCase_ , ) snake_case_ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] ) ->List[Any]: """simple docstring""" if latents is None: snake_case_ = randn_tensor(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) snake_case_ = latents.to(UpperCAmelCase_ ) snake_case_ = latents * scheduler.init_noise_sigma return latents def lowerCAmelCase ( self : Optional[Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=None , ) ->Union[str, Any]: """simple docstring""" snake_case_ = len(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else 1 # get prompt text embeddings snake_case_ = self.tokenizer( UpperCAmelCase_ , padding="""max_length""" , truncation=UpperCAmelCase_ , max_length=77 , return_attention_mask=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors="""pt""" , ) snake_case_ = text_inputs.input_ids snake_case_ = self.tokenizer(UpperCAmelCase_ , padding="""longest""" , return_tensors="""pt""" ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" F""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) snake_case_ = text_input_ids.to(UpperCAmelCase_ ) snake_case_ = text_inputs.attention_mask.to(UpperCAmelCase_ ) snake_case_ = self.text_encoder( input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) snake_case_ = prompt_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 ) snake_case_ = text_encoder_hidden_states.repeat_interleave(UpperCAmelCase_ , dim=0 ) snake_case_ = text_mask.repeat_interleave(UpperCAmelCase_ , dim=0 ) if do_classifier_free_guidance: snake_case_ = 42 if negative_prompt is None: snake_case_ = [""""""] * batch_size elif type(UpperCAmelCase_ ) is not type(UpperCAmelCase_ ): raise TypeError( F"""`negative_prompt` should be the same type to `prompt`, but got {type(UpperCAmelCase_ )} !=""" F""" {type(UpperCAmelCase_ )}.""" ) elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = [negative_prompt] elif batch_size != len(UpperCAmelCase_ ): raise ValueError( F"""`negative_prompt`: {negative_prompt} has batch size {len(UpperCAmelCase_ )}, but `prompt`:""" F""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""" ) else: snake_case_ = negative_prompt snake_case_ = self.tokenizer( UpperCAmelCase_ , padding="""max_length""" , max_length=77 , truncation=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , return_tensors="""pt""" , ) snake_case_ = uncond_input.input_ids.to(UpperCAmelCase_ ) snake_case_ = uncond_input.attention_mask.to(UpperCAmelCase_ ) snake_case_ = self.text_encoder( input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method snake_case_ = negative_prompt_embeds.shape[1] snake_case_ = negative_prompt_embeds.repeat(1 , UpperCAmelCase_ ) snake_case_ = negative_prompt_embeds.view(batch_size * num_images_per_prompt , UpperCAmelCase_ ) snake_case_ = uncond_text_encoder_hidden_states.shape[1] snake_case_ = uncond_text_encoder_hidden_states.repeat(1 , UpperCAmelCase_ , 1 ) snake_case_ = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , UpperCAmelCase_ , -1 ) snake_case_ = uncond_text_mask.repeat_interleave(UpperCAmelCase_ , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes snake_case_ = torch.cat([negative_prompt_embeds, prompt_embeds] ) snake_case_ = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) snake_case_ = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def lowerCAmelCase ( self : Dict , UpperCAmelCase_ : List[Any]=0 ) ->Tuple: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) snake_case_ = torch.device(F"""cuda:{gpu_id}""" ) snake_case_ = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCAmelCase_ , UpperCAmelCase_ ) def lowerCAmelCase ( self : int , UpperCAmelCase_ : Dict=0 ) ->Union[str, Any]: """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) snake_case_ = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCAmelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) snake_case_ = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: snake_case_ = cpu_offload_with_hook(UpperCAmelCase_ , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ ) if self.safety_checker is not None: snake_case_ = cpu_offload_with_hook(self.safety_checker , UpperCAmelCase_ , prev_module_hook=UpperCAmelCase_ ) # We'll offload the last model manually. snake_case_ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]: """simple docstring""" if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCAmelCase_ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCAmelCase_ ) def __call__( self : List[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = 512 , UpperCAmelCase_ : Tuple = 512 , UpperCAmelCase_ : Tuple = 100 , UpperCAmelCase_ : List[str] = 4.0 , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : Optional[Any] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = "pil" , UpperCAmelCase_ : str = True , ) ->Dict: """simple docstring""" if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = 1 elif isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = len(UpperCAmelCase_ ) else: raise ValueError(F"""`prompt` has to be of type `str` or `list` but is {type(UpperCAmelCase_ )}""" ) snake_case_ = self._execution_device snake_case_ = batch_size * num_images_per_prompt snake_case_ = guidance_scale > 1.0 snake_case_ = self._encode_prompt( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = torch.cat(UpperCAmelCase_ , dim=0 ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ): snake_case_ = torch.cat(UpperCAmelCase_ , dim=0 ) if do_classifier_free_guidance: snake_case_ = image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 ) snake_case_ = negative_image_embeds.repeat_interleave(UpperCAmelCase_ , dim=0 ) snake_case_ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=UpperCAmelCase_ ) self.scheduler.set_timesteps(UpperCAmelCase_ , device=UpperCAmelCase_ ) snake_case_ = self.scheduler.timesteps snake_case_ = self.unet.config.in_channels snake_case_ = get_new_h_w(UpperCAmelCase_ , UpperCAmelCase_ , self.movq_scale_factor ) # create initial latent snake_case_ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCAmelCase_ ) ): # expand the latents if we are doing classifier free guidance snake_case_ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents snake_case_ = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds} snake_case_ = self.unet( sample=UpperCAmelCase_ , timestep=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , added_cond_kwargs=UpperCAmelCase_ , return_dict=UpperCAmelCase_ , )[0] if do_classifier_free_guidance: snake_case_ = noise_pred.split(latents.shape[1] , dim=1 ) snake_case_ = noise_pred.chunk(2 ) snake_case_ = variance_pred.chunk(2 ) snake_case_ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) snake_case_ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): snake_case_ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 snake_case_ = self.scheduler.step( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , generator=UpperCAmelCase_ , ).prev_sample # post-processing snake_case_ = self.movq.decode(UpperCAmelCase_ , force_not_quantize=UpperCAmelCase_ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: snake_case_ = image * 0.5 + 0.5 snake_case_ = image.clamp(0 , 1 ) snake_case_ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": snake_case_ = self.numpy_to_pil(UpperCAmelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCAmelCase_ )
347
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
0
from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ : '''simple docstring''' def __init__( self :Union[str, Any] , a :List[Any] , a :List[str]=1_2 , a :List[Any]=7 , a :List[str]=True , a :str=True , a :List[str]=True , a :int=9_9 , a :int=3_2 , a :str=3_2 , a :Optional[int]=2 , a :Union[str, Any]=4 , a :str=3_7 , a :List[str]=0.1 , a :Any=0.1 , a :List[Any]=5_1_2 , a :List[str]=0.02 , a :int=0 , a :Optional[int]=None , ) -> Any: __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : str = batch_size __UpperCamelCase : Union[str, Any] = seq_length __UpperCamelCase : Optional[Any] = is_training __UpperCamelCase : int = use_input_mask __UpperCamelCase : List[Any] = use_labels __UpperCamelCase : Dict = vocab_size __UpperCamelCase : str = hidden_size __UpperCamelCase : List[Any] = projection_dim __UpperCamelCase : Tuple = num_hidden_layers __UpperCamelCase : Dict = num_attention_heads __UpperCamelCase : Optional[Any] = intermediate_size __UpperCamelCase : Any = dropout __UpperCamelCase : List[Any] = attention_dropout __UpperCamelCase : Optional[Any] = max_position_embeddings __UpperCamelCase : Tuple = initializer_range __UpperCamelCase : Optional[Any] = scope __UpperCamelCase : Union[str, Any] = bos_token_id def _lowerCamelCase ( self :Union[str, Any] ) -> Tuple: __UpperCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCamelCase : Union[str, Any] = None if self.use_input_mask: __UpperCamelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: __UpperCamelCase : Tuple = input_mask.numpy() __UpperCamelCase : int = input_mask.shape __UpperCamelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(a ): __UpperCamelCase : Tuple = 1 __UpperCamelCase : Optional[Any] = 0 __UpperCamelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(a ) def _lowerCamelCase ( self :Optional[Any] ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowerCamelCase ( self :int , a :Tuple , a :Optional[Any] , a :Union[str, Any] ) -> Union[str, Any]: __UpperCamelCase : int = TFBlipTextModel(config=a ) __UpperCamelCase : Union[str, Any] = model(a , attention_mask=a , training=a ) __UpperCamelCase : int = model(a , training=a ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[int]: __UpperCamelCase : Dict = self.prepare_config_and_inputs() __UpperCamelCase : Any = config_and_inputs __UpperCamelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( __lowercase , unittest.TestCase): '''simple docstring''' _A = (TFBlipTextModel,) if is_tf_available() else () _A = False _A = False _A = False def _lowerCamelCase ( self :str ) -> int: __UpperCamelCase : Union[str, Any] = BlipTextModelTester(self ) __UpperCamelCase : List[str] = ConfigTester(self , config_class=a , hidden_size=3_7 ) def _lowerCamelCase ( self :List[str] ) -> Tuple: self.config_tester.run_common_tests() def _lowerCamelCase ( self :List[Any] ) -> List[Any]: __UpperCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a ) def _lowerCamelCase ( self :Any ) -> List[str]: pass def _lowerCamelCase ( self :List[Any] ) -> Optional[int]: pass @unittest.skip(reason="Blip does not use inputs_embeds" ) def _lowerCamelCase ( self :Optional[Any] ) -> Union[str, Any]: pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" ) def _lowerCamelCase ( self :List[Any] ) -> Optional[int]: pass @unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" ) def _lowerCamelCase ( self :List[Any] ) -> Dict: pass @slow def _lowerCamelCase ( self :Any ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCamelCase : Any = TFBlipTextModel.from_pretrained(a ) self.assertIsNotNone(a ) def _lowerCamelCase ( self :Any , a :Tuple=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=a )
232
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
0
import unittest import numpy as np from transformers import is_flax_available from transformers.testing_utils import require_flax from ..test_modeling_flax_common import ids_tensor if is_flax_available(): import jax import jax.numpy as jnp from transformers.generation import ( FlaxForcedBOSTokenLogitsProcessor, FlaxForcedEOSTokenLogitsProcessor, FlaxLogitsProcessorList, FlaxMinLengthLogitsProcessor, FlaxTemperatureLogitsWarper, FlaxTopKLogitsWarper, FlaxTopPLogitsWarper, ) @require_flax class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = jnp.ones((batch_size, length) ) / length return scores def UpperCamelCase_ (self ): """simple docstring""" a = None a = 20 a = self._get_uniform_logits(batch_size=2 , length=lowerCamelCase_ ) # tweak scores to not be uniform anymore a = scores.at[1, 5].set((1 / length) + 0.1 ) # peak, 1st batch a = scores.at[1, 10].set((1 / length) - 0.4 ) # valley, 1st batch # compute softmax a = jax.nn.softmax(lowerCamelCase_ , axis=-1 ) a = FlaxTemperatureLogitsWarper(temperature=0.5 ) a = FlaxTemperatureLogitsWarper(temperature=1.3 ) a = jax.nn.softmax(temp_dist_warper_sharper(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 ) a = jax.nn.softmax(temp_dist_warper_smoother(lowerCamelCase_ , scores.copy() , cur_len=lowerCamelCase_ ) , axis=-1 ) # uniform distribution stays uniform self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3 ) ) self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3 ) ) # sharp peaks get higher, valleys get lower self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max() ) self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min() ) # smooth peaks get lower, valleys get higher self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max() ) self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min() ) def UpperCamelCase_ (self ): """simple docstring""" a = None a = 10 a = 2 # create ramp distribution a = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() a = ramp_logits[1:, : vocab_size // 2] + vocab_size a = FlaxTopKLogitsWarper(3 ) a = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # check that correct tokens are filtered self.assertListEqual(jnp.isinf(scores[0] ).tolist() , 7 * [True] + 3 * [False] ) self.assertListEqual(jnp.isinf(scores[1] ).tolist() , 2 * [True] + 3 * [False] + 5 * [True] ) # check special case a = 5 a = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3 ) a = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, length) ).copy() a = top_k_warp_safety_check(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified self.assertListEqual((scores == 0.0).sum(axis=-1 ).tolist() , [2, 2] ) def UpperCamelCase_ (self ): """simple docstring""" a = None a = 10 a = 2 # create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper) a = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]] ) ) a = FlaxTopPLogitsWarper(0.8 ) a = np.exp(top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) ) # dist should be filtered to keep min num values so that sum is >= top_p # exp (-inf) => 0 a = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]] ) self.assertTrue(np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) # check edge cases with negative and extreme logits a = np.broadcast_to(np.arange(lowerCamelCase_ )[None, :] , (batch_size, vocab_size) ).copy() - ( vocab_size // 2 ) # make ramp_logits more extreme a = ramp_logits[1] * 100.0 # make sure at least 2 tokens are kept a = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0 ) a = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2. self.assertListEqual((filtered_dist != 0.0).sum(axis=-1 ).tolist() , [3, 2] ) def UpperCamelCase_ (self ): """simple docstring""" a = 20 a = 4 a = 0 a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) # check that min length is applied at length 5 a = ids_tensor((batch_size, 20) , vocab_size=20 ) a = 5 a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf" )] ) # check that min length is not applied anymore at length 15 a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = 15 a = min_dist_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def UpperCamelCase_ (self ): """simple docstring""" a = 20 a = 4 a = 0 a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) # check that all scores are -inf except the bos_token_id score a = ids_tensor((batch_size, 1) , vocab_size=20 ) a = 1 a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0] ) # score for bos_token_id shold be zero # check that bos_token_id is not forced if current length is greater than 1 a = 3 a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def UpperCamelCase_ (self ): """simple docstring""" a = 20 a = 4 a = 0 a = 5 a = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) # check that all scores are -inf except the eos_token_id when max_length is reached a = ids_tensor((batch_size, 4) , vocab_size=20 ) a = 4 a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :] ).all() ) self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0] ) # score for eos_token_id should be zero # check that eos_token_id is not forced if max_length is not reached a = 3 a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = logits_processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) self.assertFalse(jnp.isinf(lowerCamelCase_ ).any() ) def UpperCamelCase_ (self ): """simple docstring""" a = 4 a = 10 a = 15 a = 2 a = 1 a = 15 # dummy input_ids and scores a = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ ) a = input_ids.copy() a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = scores.copy() # instantiate all dist processors a = FlaxTemperatureLogitsWarper(temperature=0.5 ) a = FlaxTopKLogitsWarper(3 ) a = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) a = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) a = 10 # no processor list a = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # with processor list a = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) a = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() ) def UpperCamelCase_ (self ): """simple docstring""" a = 4 a = 10 a = 15 a = 2 a = 1 a = 15 # dummy input_ids and scores a = ids_tensor((batch_size, sequence_length) , lowerCamelCase_ ) a = input_ids.copy() a = self._get_uniform_logits(lowerCamelCase_ , lowerCamelCase_ ) a = scores.copy() # instantiate all dist processors a = FlaxTemperatureLogitsWarper(temperature=0.5 ) a = FlaxTopKLogitsWarper(3 ) a = FlaxTopPLogitsWarper(0.8 ) # instantiate all logits processors a = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=lowerCamelCase_ ) a = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=lowerCamelCase_ ) a = FlaxForcedEOSTokenLogitsProcessor(max_length=lowerCamelCase_ , eos_token_id=lowerCamelCase_ ) a = 10 # no processor list def run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): a = temp_dist_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = top_k_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = top_p_warp(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = min_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = bos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) a = eos_dist_proc(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) return scores # with processor list def run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ): a = FlaxLogitsProcessorList( [temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc] ) a = processor(lowerCamelCase_ , lowerCamelCase_ , cur_len=lowerCamelCase_ ) return scores a = jax.jit(lowerCamelCase_ ) a = jax.jit(lowerCamelCase_ ) a = jitted_run_no_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) a = jitted_run_processor_list(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # scores should be equal self.assertTrue(jnp.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1E-3 ) ) # input_ids should never be changed self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist() )
227
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
0
import numpy as np import skfuzzy as fuzz if __name__ == "__main__": # Create universe of discourse in Python using linspace () lowercase = np.linspace(start=0, stop=75, num=75, endpoint=True, retstep=False) # Create two fuzzy sets by defining any membership function # (trapmf(), gbellmf(), gaussmf(), etc). lowercase = [0, 25, 50] lowercase = [25, 50, 75] lowercase = fuzz.membership.trimf(X, abca) lowercase = fuzz.membership.trimf(X, abca) # Compute the different operations using inbuilt functions. lowercase = np.ones(75) lowercase = np.zeros((75,)) # 1. Union = max(µA(x), µB(x)) lowercase = fuzz.fuzzy_or(X, young, X, middle_aged)[1] # 2. Intersection = min(µA(x), µB(x)) lowercase = fuzz.fuzzy_and(X, young, X, middle_aged)[1] # 3. Complement (A) = (1- min(µA(x)) lowercase = fuzz.fuzzy_not(young) # 4. Difference (A/B) = min(µA(x),(1- µB(x))) lowercase = fuzz.fuzzy_and(X, young, X, fuzz.fuzzy_not(middle_aged)[1])[1] # 5. Algebraic Sum = [µA(x) + µB(x) – (µA(x) * µB(x))] lowercase = young + middle_aged - (young * middle_aged) # 6. Algebraic Product = (µA(x) * µB(x)) lowercase = young * middle_aged # 7. Bounded Sum = min[1,(µA(x), µB(x))] lowercase = fuzz.fuzzy_and(X, one, X, young + middle_aged)[1] # 8. Bounded difference = min[0,(µA(x), µB(x))] lowercase = fuzz.fuzzy_or(X, zero, X, young - middle_aged)[1] # max-min composition # max-product composition # Plot each set A, set B and each operation result using plot() and subplot(). from matplotlib import pyplot as plt plt.figure() plt.subplot(4, 3, 1) plt.plot(X, young) plt.title("Young") plt.grid(True) plt.subplot(4, 3, 2) plt.plot(X, middle_aged) plt.title("Middle aged") plt.grid(True) plt.subplot(4, 3, 3) plt.plot(X, union) plt.title("union") plt.grid(True) plt.subplot(4, 3, 4) plt.plot(X, intersection) plt.title("intersection") plt.grid(True) plt.subplot(4, 3, 5) plt.plot(X, complement_a) plt.title("complement_a") plt.grid(True) plt.subplot(4, 3, 6) plt.plot(X, difference) plt.title("difference a/b") plt.grid(True) plt.subplot(4, 3, 7) plt.plot(X, alg_sum) plt.title("alg_sum") plt.grid(True) plt.subplot(4, 3, 8) plt.plot(X, alg_product) plt.title("alg_product") plt.grid(True) plt.subplot(4, 3, 9) plt.plot(X, bdd_sum) plt.title("bdd_sum") plt.grid(True) plt.subplot(4, 3, 10) plt.plot(X, bdd_difference) plt.title("bdd_difference") plt.grid(True) plt.subplots_adjust(hspace=0.5) plt.show()
178
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging snake_case_ = logging.get_logger(__name__) if is_vision_available(): import PIL class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ): A_ : Tuple = ['pixel_values'] def __init__(self : List[Any] , a__ : Optional[int] = True , a__ : Any = None , a__ : Optional[Any] = PILImageResampling.BICUBIC , a__ : Dict = True , a__ : int = None , a__ : Optional[Any] = True , a__ : int = 1 / 255 , a__ : Union[str, Any] = True , a__ : List[str] = None , a__ : Union[str, Any] = None , a__ : Dict = True , **a__ : str , ): """simple docstring""" super().__init__(**a__ ) __snake_case = size if size is not None else {"""shortest_edge""": 224} __snake_case = get_size_dict(a__ , default_to_square=a__ ) __snake_case = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __snake_case = get_size_dict(a__ , default_to_square=a__ , param_name='''crop_size''' ) __snake_case = do_resize __snake_case = size __snake_case = resample __snake_case = do_center_crop __snake_case = crop_size __snake_case = do_rescale __snake_case = rescale_factor __snake_case = do_normalize __snake_case = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __snake_case = image_std if image_std is not None else OPENAI_CLIP_STD __snake_case = do_convert_rgb def a (self : List[Any] , a__ : Optional[int] , a__ : str , a__ : List[str] = PILImageResampling.BICUBIC , a__ : Dict = None , **a__ : List[Any] , ): """simple docstring""" __snake_case = get_size_dict(a__ , default_to_square=a__ ) if "shortest_edge" not in size: raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __snake_case = get_resize_output_image_size(a__ , size=size['''shortest_edge'''] , default_to_square=a__ ) return resize(a__ , size=a__ , resample=a__ , data_format=a__ , **a__ ) def a (self : Union[str, Any] , a__ : Optional[Any] , a__ : int , a__ : Dict = None , **a__ : Optional[Any] , ): """simple docstring""" __snake_case = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(a__ , size=(size['''height'''], size['''width''']) , data_format=a__ , **a__ ) def a (self : str , a__ : Any , a__ : Tuple , a__ : Dict = None , **a__ : Optional[Any] , ): """simple docstring""" return rescale(a__ , scale=a__ , data_format=a__ , **a__ ) def a (self : Optional[Any] , a__ : str , a__ : Union[str, Any] , a__ : Dict , a__ : Union[str, Any] = None , **a__ : Optional[int] , ): """simple docstring""" return normalize(a__ , mean=a__ , std=a__ , data_format=a__ , **a__ ) def a (self : Any , a__ : Optional[int] , a__ : Optional[int] = None , a__ : Union[str, Any] = None , a__ : int = None , a__ : List[str] = None , a__ : str = None , a__ : List[Any] = None , a__ : Any = None , a__ : Tuple = None , a__ : Dict = None , a__ : List[str] = None , a__ : Any = None , a__ : List[Any] = None , a__ : Tuple = ChannelDimension.FIRST , **a__ : List[str] , ): """simple docstring""" __snake_case = do_resize if do_resize is not None else self.do_resize __snake_case = size if size is not None else self.size __snake_case = get_size_dict(a__ , param_name='''size''' , default_to_square=a__ ) __snake_case = resample if resample is not None else self.resample __snake_case = do_center_crop if do_center_crop is not None else self.do_center_crop __snake_case = crop_size if crop_size is not None else self.crop_size __snake_case = get_size_dict(a__ , param_name='''crop_size''' , default_to_square=a__ ) __snake_case = do_rescale if do_rescale is not None else self.do_rescale __snake_case = rescale_factor if rescale_factor is not None else self.rescale_factor __snake_case = do_normalize if do_normalize is not None else self.do_normalize __snake_case = image_mean if image_mean is not None else self.image_mean __snake_case = image_std if image_std is not None else self.image_std __snake_case = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __snake_case = make_list_of_images(a__ ) if not valid_images(a__ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __snake_case = [convert_to_rgb(a__ ) for image in images] # All transformations expect numpy arrays. __snake_case = [to_numpy_array(a__ ) for image in images] if do_resize: __snake_case = [self.resize(image=a__ , size=a__ , resample=a__ ) for image in images] if do_center_crop: __snake_case = [self.center_crop(image=a__ , size=a__ ) for image in images] if do_rescale: __snake_case = [self.rescale(image=a__ , scale=a__ ) for image in images] if do_normalize: __snake_case = [self.normalize(image=a__ , mean=a__ , std=a__ ) for image in images] __snake_case = [to_channel_dimension_format(a__ , a__ ) for image in images] __snake_case = {"""pixel_values""": images} return BatchFeature(data=a__ , tensor_type=a__ )
24
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase): snake_case__ = StableDiffusionInstructPixaPixPipeline snake_case__ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} snake_case__ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS snake_case__ = IMAGE_TO_IMAGE_IMAGE_PARAMS def _UpperCamelCase ( self : Optional[int] ) -> Any: torch.manual_seed(0 ) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) _UpperCamelCase = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) torch.manual_seed(0 ) _UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) torch.manual_seed(0 ) _UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) _UpperCamelCase = CLIPTextModel(__UpperCamelCase ) _UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) _UpperCamelCase = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=0 ) -> str: _UpperCamelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) _UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 )[0] _UpperCamelCase = Image.fromarray(np.uinta(__UpperCamelCase ) ).convert('''RGB''' ) if str(__UpperCamelCase ).startswith('''mps''' ): _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) else: _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) _UpperCamelCase = { """prompt""": """A painting of a squirrel eating a burger""", """image""": image, """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """image_guidance_scale""": 1, """output_type""": """numpy""", } return inputs def _UpperCamelCase ( self : int ) -> str: _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCamelCase = sd_pipe(**__UpperCamelCase ).images _UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCamelCase = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self : str ) -> Tuple: _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCamelCase = """french fries""" _UpperCamelCase = sd_pipe(**__UpperCamelCase , negative_prompt=__UpperCamelCase ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) _UpperCamelCase = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]: _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCamelCase = [inputs["""prompt"""]] * 2 _UpperCamelCase = np.array(inputs['''image'''] ).astype(np.floataa ) / 255.0 _UpperCamelCase = torch.from_numpy(__UpperCamelCase ).unsqueeze(0 ).to(__UpperCamelCase ) _UpperCamelCase = image / 2 + 0.5 _UpperCamelCase = image.permute(0 , 3 , 1 , 2 ) _UpperCamelCase = image.repeat(2 , 1 , 1 , 1 ) _UpperCamelCase = sd_pipe(**__UpperCamelCase ).images _UpperCamelCase = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) _UpperCamelCase = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = EulerAncestralDiscreteScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' ) _UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = self.get_dummy_inputs(__UpperCamelCase ) _UpperCamelCase = sd_pipe(**__UpperCamelCase ).images _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = [round(__UpperCamelCase , 4 ) for x in image_slice.flatten().tolist()] print(''','''.join([str(__UpperCamelCase ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) _UpperCamelCase = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self : List[Any] ) -> Optional[Any]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) def _UpperCamelCase ( self : str ) -> Tuple: _UpperCamelCase = self.get_dummy_components() _UpperCamelCase = StableDiffusionInstructPixaPixPipeline(**__UpperCamelCase ) _UpperCamelCase = VaeImageProcessor(do_resize=__UpperCamelCase , do_normalize=__UpperCamelCase ) _UpperCamelCase = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = pipe(**self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='''pt''' ) )[0] _UpperCamelCase = components["""vae"""] _UpperCamelCase = self.get_dummy_inputs_by_type(__UpperCamelCase , input_image_type='''pt''' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): _UpperCamelCase = vae.encode(inputs[image_param] ).latent_dist.mode() _UpperCamelCase = pipe(**__UpperCamelCase )[0] _UpperCamelCase = np.abs(out - out_latents_inputs ).max() self.assertLess(__UpperCamelCase , 1E-4 , '''passing latents as image input generate different result from passing image''' ) @slow @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : Any ) -> str: super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self : Any , __UpperCamelCase : int=0 ) -> Union[str, Any]: _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) _UpperCamelCase = load_image( '''https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg''' ) _UpperCamelCase = { """prompt""": """turn him into a cyborg""", """image""": image, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """image_guidance_scale""": 1.0, """output_type""": """numpy""", } return inputs def _UpperCamelCase ( self : Optional[int] ) -> Tuple: _UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCamelCase ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCamelCase = self.get_inputs() _UpperCamelCase = pipe(**__UpperCamelCase ).images _UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _UpperCamelCase = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _UpperCamelCase ( self : str ) -> List[Any]: _UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCamelCase ) _UpperCamelCase = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCamelCase = self.get_inputs() _UpperCamelCase = pipe(**__UpperCamelCase ).images _UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _UpperCamelCase = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _UpperCamelCase ( self : str ) -> Union[str, Any]: _UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCamelCase ) _UpperCamelCase = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCamelCase = self.get_inputs() _UpperCamelCase = pipe(**__UpperCamelCase ).images _UpperCamelCase = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) _UpperCamelCase = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _UpperCamelCase ( self : List[str] ) -> List[str]: _UpperCamelCase = 0 def callback_fn(__UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) -> None: _UpperCamelCase = True nonlocal number_of_steps number_of_steps += 1 if step == 1: _UpperCamelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _UpperCamelCase = latents[0, -3:, -3:, -1] _UpperCamelCase = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: _UpperCamelCase = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) _UpperCamelCase = latents[0, -3:, -3:, -1] _UpperCamelCase = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 _UpperCamelCase = False _UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) _UpperCamelCase = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCamelCase = self.get_inputs() pipe(**__UpperCamelCase , callback=__UpperCamelCase , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _UpperCamelCase ( self : Optional[int] ) -> List[Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( '''timbrooks/instruct-pix2pix''' , safety_checker=__UpperCamelCase , torch_dtype=torch.floataa ) _UpperCamelCase = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _UpperCamelCase = self.get_inputs() _UpperCamelCase = pipe(**__UpperCamelCase ) _UpperCamelCase = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def _UpperCamelCase ( self : List[str] ) -> Tuple: _UpperCamelCase = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 _UpperCamelCase = inputs["""image"""].resize((504, 504) ) _UpperCamelCase = """timbrooks/instruct-pix2pix""" _UpperCamelCase = StableDiffusionInstructPixaPixPipeline.from_pretrained( __UpperCamelCase , safety_checker=__UpperCamelCase , ) pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) pipe.enable_attention_slicing() _UpperCamelCase = pipe(**__UpperCamelCase ) _UpperCamelCase = output.images[0] _UpperCamelCase = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) _UpperCamelCase = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
256
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
0
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class snake_case__( unittest.TestCase ): '''simple docstring''' @slow def lowercase_ ( self ) -> Any: lowerCAmelCase_ : int = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' ) lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained('''xlm-roberta-base''' ) lowerCAmelCase_ : List[str] = """The dog is cute and lives in the garden house""" lowerCAmelCase_ : Union[str, Any] = jnp.array([tokenizer.encode(__lowercase )] ) lowerCAmelCase_ : Optional[Any] = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim lowerCAmelCase_ : Dict = jnp.array( [[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] ) lowerCAmelCase_ : Union[str, Any] = model(__lowercase )["""last_hidden_state"""] self.assertEqual(output.shape , __lowercase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , __lowercase , atol=1e-3 ) )
262
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
0
from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' return getitem, k def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ): '''simple docstring''' return setitem, k, v def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' return delitem, k def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , *snake_case_ ): '''simple docstring''' try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e lowercase_ : List[str] = ( _set('key_a', 'val_a'), _set('key_b', 'val_b'), ) lowercase_ : List[Any] = [ _set('key_a', 'val_a'), _set('key_a', 'val_b'), ] lowercase_ : int = [ _set('key_a', 'val_a'), _set('key_b', 'val_b'), _del('key_a'), _del('key_b'), _set('key_a', 'val_a'), _del('key_a'), ] lowercase_ : List[Any] = [ _get('key_a'), _del('key_a'), _set('key_a', 'val_a'), _del('key_a'), _del('key_a'), _get('key_a'), ] lowercase_ : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] lowercase_ : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set('key_a', 'val_b'), ] @pytest.mark.parametrize( "operations" , ( pytest.param(_add_items , id="add items" ), pytest.param(_overwrite_items , id="overwrite items" ), pytest.param(_delete_items , id="delete items" ), pytest.param(_access_absent_items , id="access absent items" ), pytest.param(_add_with_resize_up , id="add with resize up" ), pytest.param(_add_with_resize_down , id="add with resize down" ), ) , ) def __SCREAMING_SNAKE_CASE ( snake_case_ ): '''simple docstring''' _UpperCAmelCase = HashMap(initial_block_size=4 ) _UpperCAmelCase = {} for _, (fun, *args) in enumerate(_lowercase ): _UpperCAmelCase = _run_operation(_lowercase , _lowercase , *_lowercase ) _UpperCAmelCase = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __SCREAMING_SNAKE_CASE ( ): '''simple docstring''' def is_public(snake_case_ ) -> bool: return not name.startswith("_" ) _UpperCAmelCase = {name for name in dir({} ) if is_public(_lowercase )} _UpperCAmelCase = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
133
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
0
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase ): if not isinstance(_lowercase , _lowercase ): raise TypeError("only integers accepted as input" ) else: _UpperCAmelCase : str = str(abs(_lowercase ) ) _UpperCAmelCase : str = [list(_lowercase ) for char in range(len(_lowercase ) )] for index in range(len(_lowercase ) ): num_transpositions[index].pop(_lowercase ) return max( int("".join(list(_lowercase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
234
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
0
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip A_ : str = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def A ( snake_case__ ): '''simple docstring''' if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def A ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' return max(metric_fn(_lowercase , _lowercase ) for gt in ground_truths ) def A ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_lowercase , """r""" ).readlines()] SCREAMING_SNAKE_CASE__ = [] if args.gold_data_mode == "qa": SCREAMING_SNAKE_CASE__ = pd.read_csv(_lowercase , sep="""\t""" , header=_lowercase ) for answer_list in data[1]: SCREAMING_SNAKE_CASE__ = ast.literal_eval(_lowercase ) answers.append(_lowercase ) else: SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_lowercase , """r""" ).readlines()] SCREAMING_SNAKE_CASE__ = [[reference] for reference in references] SCREAMING_SNAKE_CASE__ = 0 for prediction, ground_truths in zip(_lowercase , _lowercase ): total += 1 em += metric_max_over_ground_truths(_lowercase , _lowercase , _lowercase ) fa += metric_max_over_ground_truths(_lowercase , _lowercase , _lowercase ) SCREAMING_SNAKE_CASE__ = 1_00.0 * em / total SCREAMING_SNAKE_CASE__ = 1_00.0 * fa / total logger.info(f"""F1: {fa:.2f}""" ) logger.info(f"""EM: {em:.2f}""" ) def A ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = args.k SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_lowercase , """r""" ).readlines()] SCREAMING_SNAKE_CASE__ = [line.strip() for line in open(_lowercase , """r""" ).readlines()] SCREAMING_SNAKE_CASE__ = 0 for hypo, reference in zip(_lowercase , _lowercase ): SCREAMING_SNAKE_CASE__ = set(hypo.split("""\t""" )[:k] ) SCREAMING_SNAKE_CASE__ = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k SCREAMING_SNAKE_CASE__ = 1_00.0 * em / total logger.info(f"""Precision@{k}: {em: .2f}""" ) def A ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' def strip_title(snake_case__ ): if title.startswith("""\"""" ): SCREAMING_SNAKE_CASE__ = title[1:] if title.endswith("""\"""" ): SCREAMING_SNAKE_CASE__ = title[:-1] return title SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _lowercase , return_tensors="""pt""" , padding=_lowercase , truncation=_lowercase , )["""input_ids"""].to(args.device ) SCREAMING_SNAKE_CASE__ = rag_model.rag.question_encoder(_lowercase ) SCREAMING_SNAKE_CASE__ = question_enc_outputs[0] SCREAMING_SNAKE_CASE__ = rag_model.retriever( _lowercase , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) SCREAMING_SNAKE_CASE__ = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) SCREAMING_SNAKE_CASE__ = [] for docs in all_docs: SCREAMING_SNAKE_CASE__ = [strip_title(_lowercase ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(_lowercase ) ) return provenance_strings def A ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' with torch.no_grad(): SCREAMING_SNAKE_CASE__ = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( _lowercase , return_tensors="""pt""" , padding=_lowercase , truncation=_lowercase ) SCREAMING_SNAKE_CASE__ = inputs_dict.input_ids.to(args.device ) SCREAMING_SNAKE_CASE__ = inputs_dict.attention_mask.to(args.device ) SCREAMING_SNAKE_CASE__ = rag_model.generate( # rag_model overwrites generate _lowercase , attention_mask=_lowercase , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=_lowercase , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) SCREAMING_SNAKE_CASE__ = rag_model.retriever.generator_tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase ) if args.print_predictions: for q, a in zip(_lowercase , _lowercase ): logger.info("""Q: {} - A: {}""".format(_lowercase , _lowercase ) ) return answers def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=_lowercase , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=_lowercase , choices=["""exact""", """compressed""", """legacy"""] , type=_lowercase , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=_lowercase , type=_lowercase , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=_lowercase , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=_lowercase , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=_lowercase , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=_lowercase , type=_lowercase , required=_lowercase , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=_lowercase , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=_lowercase , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=_lowercase , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=_lowercase , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=_lowercase , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=_lowercase , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) SCREAMING_SNAKE_CASE__ = parser.parse_args() SCREAMING_SNAKE_CASE__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = {} if args.model_type is None: SCREAMING_SNAKE_CASE__ = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): SCREAMING_SNAKE_CASE__ = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration SCREAMING_SNAKE_CASE__ = args.n_docs if args.index_name is not None: SCREAMING_SNAKE_CASE__ = args.index_name if args.index_path is not None: SCREAMING_SNAKE_CASE__ = args.index_path else: SCREAMING_SNAKE_CASE__ = BartForConditionalGeneration SCREAMING_SNAKE_CASE__ = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , _lowercase ) SCREAMING_SNAKE_CASE__ = get_scores if args.eval_mode == """e2e""" else get_precision_at_k SCREAMING_SNAKE_CASE__ = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(_lowercase , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(_lowercase ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): SCREAMING_SNAKE_CASE__ = RagRetriever.from_pretrained(_lowercase , **_lowercase ) SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_lowercase , retriever=_lowercase , **_lowercase ) model.retriever.init_retrieval() else: SCREAMING_SNAKE_CASE__ = model_class.from_pretrained(_lowercase , **_lowercase ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: SCREAMING_SNAKE_CASE__ = [] for line in tqdm(_lowercase ): questions.append(line.strip() ) if len(_lowercase ) == args.eval_batch_size: SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_lowercase , _lowercase , _lowercase ) preds_file.write("""\n""".join(_lowercase ) + """\n""" ) preds_file.flush() SCREAMING_SNAKE_CASE__ = [] if len(_lowercase ) > 0: SCREAMING_SNAKE_CASE__ = evaluate_batch_fn(_lowercase , _lowercase , _lowercase ) preds_file.write("""\n""".join(_lowercase ) ) preds_file.flush() score_fn(_lowercase , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": A_ : Optional[int] = get_args() main(args)
165
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
0
import os from collections.abc import Iterator def a ( snake_case__: Optional[Any] = "." ): '''simple docstring''' for dir_path, dir_names, filenames in os.walk(_lowercase ): lowercase_ = [d for d in dir_names if d != """scripts""" and d[0] not in """._"""] for filename in filenames: if filename == "__init__.py": continue if os.path.splitext(_lowercase )[1] in (".py", ".ipynb"): yield os.path.join(_lowercase , _lowercase ).lstrip('''./''' ) def a ( snake_case__: Optional[Any] ): '''simple docstring''' return F'''{i * ' '}*''' if i else "\n##" def a ( snake_case__: Optional[Any] , snake_case__: Union[str, Any] ): '''simple docstring''' lowercase_ = old_path.split(os.sep ) for i, new_part in enumerate(new_path.split(os.sep ) ): if (i + 1 > len(_lowercase ) or old_parts[i] != new_part) and new_part: print(F'''{md_prefix(_lowercase )} {new_part.replace('_' , ' ' ).title()}''' ) return new_path def a ( snake_case__: str = "." ): '''simple docstring''' lowercase_ = """""" for filepath in sorted(good_file_paths(_lowercase ) ): lowercase_ = os.path.split(_lowercase ) if filepath != old_path: lowercase_ = print_path(_lowercase , _lowercase ) lowercase_ = (filepath.count(os.sep ) + 1) if filepath else 0 lowercase_ = F'''{filepath}/{filename}'''.replace(''' ''' , '''%20''' ) lowercase_ = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0] print(F'''{md_prefix(_lowercase )} [{filename}]({url})''' ) if __name__ == "__main__": print_directory_md('.')
30
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : Any = { """configuration_roberta""": ["""ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RobertaConfig""", """RobertaOnnxConfig"""], """tokenization_roberta""": ["""RobertaTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = ["""RobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ """ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """RobertaForCausalLM""", """RobertaForMaskedLM""", """RobertaForMultipleChoice""", """RobertaForQuestionAnswering""", """RobertaForSequenceClassification""", """RobertaForTokenClassification""", """RobertaModel""", """RobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Dict = [ """TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFRobertaForCausalLM""", """TFRobertaForMaskedLM""", """TFRobertaForMultipleChoice""", """TFRobertaForQuestionAnswering""", """TFRobertaForSequenceClassification""", """TFRobertaForTokenClassification""", """TFRobertaMainLayer""", """TFRobertaModel""", """TFRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : List[str] = [ """FlaxRobertaForCausalLM""", """FlaxRobertaForMaskedLM""", """FlaxRobertaForMultipleChoice""", """FlaxRobertaForQuestionAnswering""", """FlaxRobertaForSequenceClassification""", """FlaxRobertaForTokenClassification""", """FlaxRobertaModel""", """FlaxRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig from .tokenization_roberta import RobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roberta_fast import RobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, RobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roberta import ( FlaxRobertaForCausalLM, FlaxRobertaForMaskedLM, FlaxRobertaForMultipleChoice, FlaxRobertaForQuestionAnswering, FlaxRobertaForSequenceClassification, FlaxRobertaForTokenClassification, FlaxRobertaModel, FlaxRobertaPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
0
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def __init__( self :Union[str, Any] , a :Optional[int] , a :int=7 , a :Tuple=3 , a :int=3_0 , a :Any=4_0_0 , a :Optional[int]=True , a :Any=None , a :Optional[int]=True , a :List[str]=[0.5, 0.5, 0.5] , a :Optional[Any]=[0.5, 0.5, 0.5] , a :Optional[int]=True , a :List[str]=1 / 2_5_5 , a :Any=True , ) -> Optional[Any]: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __UpperCamelCase : Optional[int] = size if size is not None else {"""shortest_edge""": 1_8, """longest_edge""": 1_3_3_3} __UpperCamelCase : Optional[Any] = parent __UpperCamelCase : Optional[Any] = batch_size __UpperCamelCase : Any = num_channels __UpperCamelCase : int = min_resolution __UpperCamelCase : List[Any] = max_resolution __UpperCamelCase : int = do_resize __UpperCamelCase : Union[str, Any] = size __UpperCamelCase : Any = do_normalize __UpperCamelCase : str = image_mean __UpperCamelCase : Any = image_std __UpperCamelCase : Union[str, Any] = do_rescale __UpperCamelCase : List[str] = rescale_factor __UpperCamelCase : List[Any] = do_pad def _lowerCamelCase ( self :Optional[Any] ) -> List[str]: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def _lowerCamelCase ( self :int , a :List[Any] , a :Any=False ) -> Dict: if not batched: __UpperCamelCase : str = image_inputs[0] if isinstance(a , Image.Image ): __UpperCamelCase : Tuple = image.size else: __UpperCamelCase : str = image.shape[1], image.shape[2] if w < h: __UpperCamelCase : Tuple = int(self.size["shortest_edge"] * h / w ) __UpperCamelCase : Any = self.size["""shortest_edge"""] elif w > h: __UpperCamelCase : Tuple = self.size["""shortest_edge"""] __UpperCamelCase : int = int(self.size["shortest_edge"] * w / h ) else: __UpperCamelCase : Any = self.size["""shortest_edge"""] __UpperCamelCase : Optional[int] = self.size["""shortest_edge"""] else: __UpperCamelCase : int = [] for image in image_inputs: __UpperCamelCase : Union[str, Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCamelCase : List[Any] = max(a , key=lambda a : item[0] )[0] __UpperCamelCase : Dict = max(a , key=lambda a : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase__ ( __lowercase , unittest.TestCase): '''simple docstring''' _A = DetaImageProcessor if is_vision_available() else None def _lowerCamelCase ( self :Any ) -> Any: __UpperCamelCase : str = DetaImageProcessingTester(self ) @property def _lowerCamelCase ( self :Optional[Any] ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self :Any ) -> List[str]: __UpperCamelCase : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(a , "image_mean" ) ) self.assertTrue(hasattr(a , "image_std" ) ) self.assertTrue(hasattr(a , "do_normalize" ) ) self.assertTrue(hasattr(a , "do_resize" ) ) self.assertTrue(hasattr(a , "do_rescale" ) ) self.assertTrue(hasattr(a , "do_pad" ) ) self.assertTrue(hasattr(a , "size" ) ) def _lowerCamelCase ( self :str ) -> Tuple: __UpperCamelCase : str = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} ) self.assertEqual(image_processor.do_pad , a ) def _lowerCamelCase ( self :Optional[Any] ) -> int: pass def _lowerCamelCase ( self :int ) -> Dict: # Initialize image_processing __UpperCamelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a ) for image in image_inputs: self.assertIsInstance(a , Image.Image ) # Test not batched input __UpperCamelCase : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase : Any = self.image_processor_tester.get_expected_values(a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Dict = self.image_processor_tester.get_expected_values(a , batched=a ) __UpperCamelCase : Union[str, Any] = image_processing(a , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowerCamelCase ( self :Optional[int] ) -> List[Any]: # Initialize image_processing __UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a ) for image in image_inputs: self.assertIsInstance(a , np.ndarray ) # Test not batched input __UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase : List[Any] = self.image_processor_tester.get_expected_values(a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : List[str] = image_processing(a , return_tensors="pt" ).pixel_values __UpperCamelCase : List[Any] = self.image_processor_tester.get_expected_values(a , batched=a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowerCamelCase ( self :Tuple ) -> str: # Initialize image_processing __UpperCamelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCamelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a ) for image in image_inputs: self.assertIsInstance(a , torch.Tensor ) # Test not batched input __UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values __UpperCamelCase : int = self.image_processor_tester.get_expected_values(a ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCamelCase : Dict = image_processing(a , return_tensors="pt" ).pixel_values __UpperCamelCase : Dict = self.image_processor_tester.get_expected_values(a , batched=a ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def _lowerCamelCase ( self :List[Any] ) -> List[str]: # prepare image and target __UpperCamelCase : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: __UpperCamelCase : str = json.loads(f.read() ) __UpperCamelCase : List[str] = {"""image_id""": 3_9_7_6_9, """annotations""": target} # encode them __UpperCamelCase : Union[str, Any] = DetaImageProcessor() __UpperCamelCase : Tuple = image_processing(images=a , annotations=a , return_tensors="pt" ) # verify pixel values __UpperCamelCase : List[str] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , a ) __UpperCamelCase : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1E-4 ) ) # verify area __UpperCamelCase : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) ) # verify boxes __UpperCamelCase : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , a ) __UpperCamelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1E-3 ) ) # verify image_id __UpperCamelCase : Tuple = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) ) # verify is_crowd __UpperCamelCase : List[str] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) ) # verify class_labels __UpperCamelCase : Dict = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) ) # verify orig_size __UpperCamelCase : int = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) ) # verify size __UpperCamelCase : Dict = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) ) @slow def _lowerCamelCase ( self :Optional[Any] ) -> int: # prepare image, target and masks_path __UpperCamelCase : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: __UpperCamelCase : Any = json.loads(f.read() ) __UpperCamelCase : int = {"""file_name""": """000000039769.png""", """image_id""": 3_9_7_6_9, """segments_info""": target} __UpperCamelCase : Union[str, Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them __UpperCamelCase : Optional[Any] = DetaImageProcessor(format="coco_panoptic" ) __UpperCamelCase : Union[str, Any] = image_processing(images=a , annotations=a , masks_path=a , return_tensors="pt" ) # verify pixel values __UpperCamelCase : Union[str, Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] ) self.assertEqual(encoding["pixel_values"].shape , a ) __UpperCamelCase : Union[str, Any] = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , a , atol=1E-4 ) ) # verify area __UpperCamelCase : int = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , a ) ) # verify boxes __UpperCamelCase : List[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , a ) __UpperCamelCase : Tuple = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , a , atol=1E-3 ) ) # verify image_id __UpperCamelCase : Dict = torch.tensor([3_9_7_6_9] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , a ) ) # verify is_crowd __UpperCamelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , a ) ) # verify class_labels __UpperCamelCase : List[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , a ) ) # verify masks __UpperCamelCase : Any = 8_2_2_8_7_3 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , a ) # verify orig_size __UpperCamelCase : Any = torch.tensor([4_8_0, 6_4_0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , a ) ) # verify size __UpperCamelCase : Any = torch.tensor([8_0_0, 1_0_6_6] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , a ) )
232
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
0
from math import factorial class _lowercase : """simple docstring""" def __init__(self , lowerCamelCase_ , lowerCamelCase_ ): """simple docstring""" a = real if isinstance(lowerCamelCase_ , lowerCamelCase_ ): a = [1] * rank else: a = rank def __repr__(self ): """simple docstring""" return ( F'''{self.real}+''' F'''{'+'.join(str(lowerCamelCase_ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}''' ) def UpperCamelCase_ (self ): """simple docstring""" a = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , lowerCamelCase_ ) def __add__(self , lowerCamelCase_ ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): return Dual(self.real + other , self.duals ) a = self.duals.copy() a = other.duals.copy() if len(lowerCamelCase_ ) > len(lowerCamelCase_ ): o_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) ) elif len(lowerCamelCase_ ) < len(lowerCamelCase_ ): s_dual.extend([1] * (len(lowerCamelCase_ ) - len(lowerCamelCase_ )) ) a = [] for i in range(len(lowerCamelCase_ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , lowerCamelCase_ ) __A = __add__ def __sub__(self , lowerCamelCase_ ): """simple docstring""" return self + other * -1 def __mul__(self , lowerCamelCase_ ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): a = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , lowerCamelCase_ ) a = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , lowerCamelCase_ ) __A = __mul__ def __truediv__(self , lowerCamelCase_ ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): a = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , lowerCamelCase_ ) raise ValueError def __floordiv__(self , lowerCamelCase_ ): """simple docstring""" if not isinstance(lowerCamelCase_ , lowerCamelCase_ ): a = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , lowerCamelCase_ ) raise ValueError def __pow__(self , lowerCamelCase_ ): """simple docstring""" if n < 0 or isinstance(lowerCamelCase_ , lowerCamelCase_ ): raise ValueError("power must be a positive integer" ) if n == 0: return 1 if n == 1: return self a = self for _ in range(n - 1 ): x *= self return x def a( A : Tuple , A : Optional[Any] , A : List[str] ) -> List[Any]: """simple docstring""" if not callable(_lowercase ): raise ValueError("differentiate() requires a function as input for func" ) if not isinstance(_lowercase , (float, int) ): raise ValueError("differentiate() requires a float as input for position" ) if not isinstance(_lowercase , _lowercase ): raise ValueError("differentiate() requires an int as input for order" ) a = Dual(_lowercase , 1 ) a = func(_lowercase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_lowercase ) if __name__ == "__main__": import doctest doctest.testmod() def a( A : Union[str, Any] ) -> int: """simple docstring""" return y**2 * y**4 print(differentiate(f, 9, 2))
227
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
0
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("3.8"): import importlib_metadata else: import importlib.metadata as importlib_metadata def __UpperCAmelCase ( a_ , a_=False): try: snake_case_ = os.environ[key] except KeyError: # KEY isn't set, default to `default`. snake_case_ = default else: # KEY is set, convert it to True or False. try: snake_case_ = strtobool(_lowercase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value lowercase = parse_flag_from_env("RUN_SLOW", default=False) lowercase = parse_flag_from_env("RUN_REMOTE", default=False) lowercase = parse_flag_from_env("RUN_LOCAL", default=True) lowercase = parse_flag_from_env("RUN_PACKAGED", default=True) # Compression lowercase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="test requires lz4") lowercase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="test requires py7zr") lowercase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="test requires zstandard") # Audio lowercase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("soundfile") is None or version.parse(importlib_metadata.version("soundfile")) < version.parse("0.12.0"), reason="test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; ", ) # Beam lowercase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("0.3.2"), reason="test requires apache-beam and a compatible dill version", ) # Dill-cloudpickle compatibility lowercase = pytest.mark.skipif( config.DILL_VERSION <= version.parse("0.3.2"), reason="test requires dill>0.3.2 for cloudpickle compatibility", ) # Windows lowercase = pytest.mark.skipif( sys.platform == "win32", reason="test should not be run on Windows", ) def __UpperCAmelCase ( a_): try: import faiss # noqa except ImportError: snake_case_ = unittest.skip('test requires faiss')(_lowercase) return test_case def __UpperCAmelCase ( a_): try: import regex # noqa except ImportError: snake_case_ = unittest.skip('test requires regex')(_lowercase) return test_case def __UpperCAmelCase ( a_): try: import elasticsearch # noqa except ImportError: snake_case_ = unittest.skip('test requires elasticsearch')(_lowercase) return test_case def __UpperCAmelCase ( a_): try: import sqlalchemy # noqa except ImportError: snake_case_ = unittest.skip('test requires sqlalchemy')(_lowercase) return test_case def __UpperCAmelCase ( a_): if not config.TORCH_AVAILABLE: snake_case_ = unittest.skip('test requires PyTorch')(_lowercase) return test_case def __UpperCAmelCase ( a_): if not config.TF_AVAILABLE: snake_case_ = unittest.skip('test requires TensorFlow')(_lowercase) return test_case def __UpperCAmelCase ( a_): if not config.JAX_AVAILABLE: snake_case_ = unittest.skip('test requires JAX')(_lowercase) return test_case def __UpperCAmelCase ( a_): if not config.PIL_AVAILABLE: snake_case_ = unittest.skip('test requires Pillow')(_lowercase) return test_case def __UpperCAmelCase ( a_): try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers')(_lowercase) else: return test_case def __UpperCAmelCase ( a_): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken')(_lowercase) else: return test_case def __UpperCAmelCase ( a_): try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy')(_lowercase) else: return test_case def __UpperCAmelCase ( a_): def _require_spacy_model(a_): try: import spacy # noqa F401 spacy.load(_lowercase) except ImportError: return unittest.skip('test requires spacy')(_lowercase) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(_lowercase))(_lowercase) else: return test_case return _require_spacy_model def __UpperCAmelCase ( a_): try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark')(_lowercase) else: return test_case def __UpperCAmelCase ( a_): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark')(_lowercase) else: return test_case def __UpperCAmelCase ( a_): if not _run_slow_tests or _run_slow_tests == 0: snake_case_ = unittest.skip('test is slow')(_lowercase) return test_case def __UpperCAmelCase ( a_): if not _run_local_tests or _run_local_tests == 0: snake_case_ = unittest.skip('test is local')(_lowercase) return test_case def __UpperCAmelCase ( a_): if not _run_packaged_tests or _run_packaged_tests == 0: snake_case_ = unittest.skip('test is packaged')(_lowercase) return test_case def __UpperCAmelCase ( a_): if not _run_remote_tests or _run_remote_tests == 0: snake_case_ = unittest.skip('test requires remote')(_lowercase) return test_case def __UpperCAmelCase ( *a_): def decorate(cls): for name, fn in cls.__dict__.items(): if callable(_lowercase) and name.startswith('test'): for decorator in decorators: snake_case_ = decorator(_lowercase) setattr(cls , _lowercase , _lowercase) return cls return decorate class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' pass class UpperCamelCase_ ( snake_case_ ): '''simple docstring''' lowerCAmelCase = 0 lowerCAmelCase = 1 lowerCAmelCase = 2 @contextmanager def __UpperCAmelCase ( a_=OfflineSimulationMode.CONNECTION_FAILS , a_=1E-16): snake_case_ = requests.Session().request def timeout_request(a_ , a_ , a_ , **a_): # Change the url to an invalid url so that the connection hangs snake_case_ = """https://10.255.255.1""" if kwargs.get('timeout') is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') snake_case_ = timeout try: return online_request(_lowercase , _lowercase , **_lowercase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier snake_case_ = url snake_case_ = e.args[0] snake_case_ = (max_retry_error.args[0].replace('10.255.255.1' , f'''OfflineMock[{url}]'''),) snake_case_ = (max_retry_error,) raise def raise_connection_error(a_ , a_ , **a_): raise requests.ConnectionError('Offline mode is enabled.' , request=_lowercase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , _lowercase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , _lowercase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , _lowercase): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.') @contextmanager def __UpperCAmelCase ( *a_ , **a_): snake_case_ = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowercase , **_lowercase) as tmp_dir: try: os.chdir(_lowercase) yield finally: os.chdir(_lowercase) @contextmanager def __UpperCAmelCase ( ): import gc gc.collect() snake_case_ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __UpperCAmelCase ( ): import gc gc.collect() snake_case_ = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __UpperCAmelCase ( a_ , a_): return deepcopy(_lowercase).integers(0 , 1_00 , 10).tolist() == deepcopy(_lowercase).integers(0 , 1_00 , 10).tolist() def __UpperCAmelCase ( a_): import decorator from requests.exceptions import HTTPError def _wrapper(a_ , *a_ , **a_): try: return func(*_lowercase , **_lowercase) except HTTPError as err: if str(_lowercase).startswith('500') or str(_lowercase).startswith('502'): pytest.xfail(str(_lowercase)) raise err return decorator.decorator(_wrapper , _lowercase) class UpperCamelCase_ : '''simple docstring''' def __init__( self , a , a , a ) -> Any: snake_case_ = returncode snake_case_ = stdout snake_case_ = stderr async def __UpperCAmelCase ( a_ , a_): while True: snake_case_ = await stream.readline() if line: callback(_lowercase) else: break async def __UpperCAmelCase ( a_ , a_=None , a_=None , a_=None , a_=False , a_=False): if echo: print('\nRunning: ' , ' '.join(_lowercase)) snake_case_ = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowercase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowercase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) snake_case_ = [] snake_case_ = [] def tee(a_ , a_ , a_ , a_=""): snake_case_ = line.decode('utf-8').rstrip() sink.append(_lowercase) if not quiet: print(_lowercase , _lowercase , file=_lowercase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda a_: tee(_lowercase , _lowercase , sys.stdout , label='stdout:')), _read_stream(p.stderr , lambda a_: tee(_lowercase , _lowercase , sys.stderr , label='stderr:')), ] , timeout=_lowercase , ) return _RunOutput(await p.wait() , _lowercase , _lowercase) def __UpperCAmelCase ( a_ , a_=None , a_=None , a_=1_80 , a_=False , a_=True): snake_case_ = asyncio.get_event_loop() snake_case_ = loop.run_until_complete( _stream_subprocess(_lowercase , env=_lowercase , stdin=_lowercase , timeout=_lowercase , quiet=_lowercase , echo=_lowercase)) snake_case_ = """ """.join(_lowercase) if result.returncode > 0: snake_case_ = """\n""".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def __UpperCAmelCase ( ): snake_case_ = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0') snake_case_ = re.sub(R'^gw' , '' , _lowercase , 0 , re.M) return int(_lowercase) def __UpperCAmelCase ( ): snake_case_ = 2_95_00 snake_case_ = pytest_xdist_worker_id() return port + uniq_delta
178
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
0
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices snake_case_ = logging.get_logger(__name__) snake_case_ = { """microsoft/focalnet-tiny""": """https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json""", } class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , _UpperCAmelCase ): A_ : Tuple = 'focalnet' def __init__(self : int , a__ : Dict=224 , a__ : Dict=4 , a__ : int=3 , a__ : List[Any]=96 , a__ : str=False , a__ : Optional[Any]=[192, 384, 768, 768] , a__ : Optional[Any]=[2, 2, 6, 2] , a__ : str=[2, 2, 2, 2] , a__ : Optional[Any]=[3, 3, 3, 3] , a__ : List[str]="gelu" , a__ : List[Any]=4.0 , a__ : List[Any]=0.0 , a__ : List[Any]=0.1 , a__ : Any=False , a__ : Optional[Any]=1E-4 , a__ : Dict=False , a__ : int=False , a__ : Optional[int]=False , a__ : int=0.0_2 , a__ : int=1E-5 , a__ : Tuple=32 , a__ : List[Any]=None , a__ : Tuple=None , **a__ : int , ): """simple docstring""" super().__init__(**a__ ) __snake_case = image_size __snake_case = patch_size __snake_case = num_channels __snake_case = embed_dim __snake_case = use_conv_embed __snake_case = hidden_sizes __snake_case = depths __snake_case = focal_levels __snake_case = focal_windows __snake_case = hidden_act __snake_case = mlp_ratio __snake_case = hidden_dropout_prob __snake_case = drop_path_rate __snake_case = use_layerscale __snake_case = layerscale_value __snake_case = use_post_layernorm __snake_case = use_post_layernorm_in_modulation __snake_case = normalize_modulator __snake_case = initializer_range __snake_case = layer_norm_eps __snake_case = encoder_stride __snake_case = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )] __snake_case = get_aligned_output_features_output_indices( out_features=a__ , out_indices=a__ , stage_names=self.stage_names )
24
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
0
"""simple docstring""" import argparse import logging import os import time import timeit import datasets import numpy as np import pycuda.autoinit # noqa: F401 import pycuda.driver as cuda import tensorrt as trt import torch from absl import logging as absl_logging from accelerate import Accelerator from datasets import load_dataset, load_metric from torch.utils.data import DataLoader from utils_qa import postprocess_qa_predictions import transformers from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed from transformers.trainer_pt_utils import nested_concat, nested_truncate UpperCAmelCase = trt.Logger(trt.Logger.WARNING) UpperCAmelCase = absl_logging.get_absl_logger() absl_logger.setLevel(logging.WARNING) UpperCAmelCase = logging.getLogger(__name__) UpperCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( """--onnx_model_path""", default=None, type=str, required=True, help="""Path to ONNX model: """, ) parser.add_argument( """--output_dir""", default=None, type=str, required=True, help="""The output directory where the model checkpoints and predictions will be written.""", ) # Other parameters parser.add_argument( """--tokenizer_name""", default="""""", type=str, required=True, help="""Pretrained tokenizer name or path if not the same as model_name""", ) parser.add_argument( """--version_2_with_negative""", action="""store_true""", help="""If true, the SQuAD examples contain some that do not have an answer.""", ) parser.add_argument( """--null_score_diff_threshold""", type=float, default=0.0, help="""If null_score - best_non_null is greater than the threshold predict null.""", ) parser.add_argument( """--max_seq_length""", default=384, type=int, help=( """The maximum total input sequence length after WordPiece tokenization. Sequences """ """longer than this will be truncated, and sequences shorter than this will be padded.""" ), ) parser.add_argument( """--doc_stride""", default=128, type=int, help="""When splitting up a long document into chunks, how much stride to take between chunks.""", ) parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""") parser.add_argument( """--n_best_size""", default=20, type=int, help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""", ) parser.add_argument( """--max_answer_length""", default=30, type=int, help=( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ), ) parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""") parser.add_argument( """--dataset_name""", type=str, default=None, required=True, help="""The name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--dataset_config_name""", type=str, default=None, help="""The configuration name of the dataset to use (via the datasets library).""", ) parser.add_argument( """--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data.""" ) parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""") parser.add_argument( """--fp16""", action="""store_true""", help="""Whether to use 16-bit (mixed) precision instead of 32-bit""", ) parser.add_argument( """--int8""", action="""store_true""", help="""Whether to use INT8""", ) UpperCAmelCase = parser.parse_args() if args.tokenizer_name: UpperCAmelCase = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True) else: raise ValueError( """You are instantiating a new tokenizer from scratch. This is not supported by this script.""" """You can do it from another script, save it, and load it from here, using --tokenizer_name.""" ) logger.info("""Training/evaluation parameters %s""", args) UpperCAmelCase = args.per_device_eval_batch_size UpperCAmelCase = (args.eval_batch_size, args.max_seq_length) # TRT Engine properties UpperCAmelCase = True UpperCAmelCase = """temp_engine/bert-fp32.engine""" if args.fpaa: UpperCAmelCase = """temp_engine/bert-fp16.engine""" if args.inta: UpperCAmelCase = """temp_engine/bert-int8.engine""" # import ONNX file if not os.path.exists("""temp_engine"""): os.makedirs("""temp_engine""") UpperCAmelCase = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser( network, TRT_LOGGER ) as parser: with open(args.onnx_model_path, """rb""") as model: if not parser.parse(model.read()): for error in range(parser.num_errors): print(parser.get_error(error)) # Query input names and shapes from parsed TensorRT network UpperCAmelCase = [network.get_input(i) for i in range(network.num_inputs)] UpperCAmelCase = [_input.name for _input in network_inputs] # ex: ["actual_input1"] with builder.create_builder_config() as config: UpperCAmelCase = 1 << 50 if STRICT_TYPES: config.set_flag(trt.BuilderFlag.STRICT_TYPES) if args.fpaa: config.set_flag(trt.BuilderFlag.FPaa) if args.inta: config.set_flag(trt.BuilderFlag.INTa) UpperCAmelCase = builder.create_optimization_profile() config.add_optimization_profile(profile) for i in range(len(input_names)): profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE) UpperCAmelCase = builder.build_engine(network, config) # serialize_engine and store in file (can be directly loaded and deserialized): with open(engine_name, """wb""") as f: f.write(engine.serialize()) def lowercase ( a__ : Any , a__ : Optional[int] , a__ : Tuple , a__ : Any , a__ : Any , a__ : List[Any] , a__ : Dict , a__ : Union[str, Any] ) -> Optional[int]: _UpperCamelCase = np.asarray(inputs['''input_ids'''] , dtype=np.intaa ) _UpperCamelCase = np.asarray(inputs['''attention_mask'''] , dtype=np.intaa ) _UpperCamelCase = np.asarray(inputs['''token_type_ids'''] , dtype=np.intaa ) # Copy inputs cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowercase ) cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowercase ) cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowercase ) # start time _UpperCamelCase = time.time() # Run inference context.execute_async( bindings=[int(_lowercase ) for d_inp in d_inputs] + [int(_lowercase ), int(_lowercase )] , stream_handle=stream.handle ) # Transfer predictions back from GPU cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase ) cuda.memcpy_dtoh_async(_lowercase , _lowercase , _lowercase ) # Synchronize the stream and take time stream.synchronize() # end time _UpperCamelCase = time.time() _UpperCamelCase = end_time - start_time _UpperCamelCase = (h_outputa, h_outputa) # print(outputs) return outputs, infer_time # Initialize the accelerator. We will let the accelerator handle device placement for us in this example. UpperCAmelCase = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO, ) # Setup logging, we only want one process per machine to log things on the screen. # accelerator.is_local_main_process is only True for one process per machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). if args.dataset_name is not None: # Downloading and loading a dataset from the hub. UpperCAmelCase = load_dataset(args.dataset_name, args.dataset_config_name) else: raise ValueError("""Evaluation requires a dataset name""") # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. # Preprocessing the datasets. # Preprocessing is slighlty different for training and evaluation. UpperCAmelCase = raw_datasets["""validation"""].column_names UpperCAmelCase = """question""" if """question""" in column_names else column_names[0] UpperCAmelCase = """context""" if """context""" in column_names else column_names[1] UpperCAmelCase = """answers""" if """answers""" in column_names else column_names[2] # Padding side determines if we do (question|context) or (context|question). UpperCAmelCase = tokenizer.padding_side == """right""" if args.max_seq_length > tokenizer.model_max_length: logger.warning( F'''The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the''' F'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' ) UpperCAmelCase = min(args.max_seq_length, tokenizer.model_max_length) def lowercase ( a__ : Dict ) -> List[Any]: # Some of the questions have lots of whitespace on the left, which is not useful and will make the # truncation of the context fail (the tokenized question will take a lots of space). So we remove that # left whitespace _UpperCamelCase = [q.lstrip() for q in examples[question_column_name]] # Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results # in one example possible giving several features when a context is long, each of those features having a # context that overlaps a bit the context of the previous feature. _UpperCamelCase = tokenizer( examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation='''only_second''' if pad_on_right else '''only_first''' , max_length=_lowercase , stride=args.doc_stride , return_overflowing_tokens=_lowercase , return_offsets_mapping=_lowercase , padding='''max_length''' , ) # Since one example might give us several features if it has a long context, we need a map from a feature to # its corresponding example. This key gives us just that. _UpperCamelCase = tokenized_examples.pop('''overflow_to_sample_mapping''' ) # For evaluation, we will need to convert our predictions to substrings of the context, so we keep the # corresponding example_id and we will store the offset mappings. _UpperCamelCase = [] for i in range(len(tokenized_examples['''input_ids'''] ) ): # Grab the sequence corresponding to that example (to know what is the context and what is the question). _UpperCamelCase = tokenized_examples.sequence_ids(_lowercase ) _UpperCamelCase = 1 if pad_on_right else 0 # One example can give several spans, this is the index of the example containing this span of text. _UpperCamelCase = sample_mapping[i] tokenized_examples["example_id"].append(examples['''id'''][sample_index] ) # Set to None the offset_mapping that are not part of the context so it's easy to determine if a token # position is part of the context or not. _UpperCamelCase = [ (o if sequence_ids[k] == context_index else None) for k, o in enumerate(tokenized_examples['''offset_mapping'''][i] ) ] return tokenized_examples UpperCAmelCase = raw_datasets["""validation"""] # Validation Feature Creation UpperCAmelCase = eval_examples.map( prepare_validation_features, batched=True, num_proc=args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not args.overwrite_cache, desc="""Running tokenizer on validation dataset""", ) UpperCAmelCase = default_data_collator UpperCAmelCase = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""]) UpperCAmelCase = DataLoader( eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size ) def lowercase ( a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : int , a__ : Optional[Any]="eval" ) -> Tuple: # Post-processing: we match the start logits and end logits to answers in the original context. _UpperCamelCase = postprocess_qa_predictions( examples=_lowercase , features=_lowercase , predictions=_lowercase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowercase , ) # Format the result to the format the metric expects. if args.version_2_with_negative: _UpperCamelCase = [ {"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items() ] else: _UpperCamelCase = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()] _UpperCamelCase = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples] return EvalPrediction(predictions=_lowercase , label_ids=_lowercase ) UpperCAmelCase = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""") # Evaluation! logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path) with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine( f.read() ) as engine, engine.create_execution_context() as context: # setup for TRT inferrence for i in range(len(input_names)): context.set_binding_shape(i, INPUT_SHAPE) assert context.all_binding_shapes_specified def lowercase ( a__ : Dict ) -> Union[str, Any]: return trt.volume(engine.get_binding_shape(_lowercase ) ) * engine.get_binding_dtype(_lowercase ).itemsize # Allocate device memory for inputs and outputs. UpperCAmelCase = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)] # Allocate output buffer UpperCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa) UpperCAmelCase = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa) UpperCAmelCase = cuda.mem_alloc(h_outputa.nbytes) UpperCAmelCase = cuda.mem_alloc(h_outputa.nbytes) # Create a stream in which to copy inputs/outputs and run inference. UpperCAmelCase = cuda.Stream() # Evaluation logger.info("""***** Running Evaluation *****""") logger.info(F''' Num examples = {len(eval_dataset)}''') logger.info(F''' Batch size = {args.per_device_eval_batch_size}''') UpperCAmelCase = 0.0 UpperCAmelCase = 0 UpperCAmelCase = timeit.default_timer() UpperCAmelCase = None for step, batch in enumerate(eval_dataloader): UpperCAmelCase = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream) total_time += infer_time niter += 1 UpperCAmelCase = outputs UpperCAmelCase = torch.tensor(start_logits) UpperCAmelCase = torch.tensor(end_logits) # necessary to pad predictions and labels for being gathered UpperCAmelCase = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100) UpperCAmelCase = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100) UpperCAmelCase = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy()) UpperCAmelCase = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100) if all_preds is not None: UpperCAmelCase = nested_truncate(all_preds, len(eval_dataset)) UpperCAmelCase = timeit.default_timer() - start_time logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset)) # Inference time from TRT logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 1_000 / niter)) logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 1_000)) logger.info("""Total Number of Inference = %d""", niter) UpperCAmelCase = post_processing_function(eval_examples, eval_dataset, all_preds) UpperCAmelCase = metric.compute(predictions=prediction.predictions, references=prediction.label_ids) logger.info(F'''Evaluation metrics: {eval_metric}''')
256
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
0
import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging _UpperCAmelCase : List[Any] =logging.get_logger(__name__) # pylint: disable=invalid-name class snake_case__( UpperCAmelCase__ ): '''simple docstring''' def __init__( self , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase , ) -> Optional[Any]: super().__init__() self.register_modules( vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , ) def lowercase_ ( self , __lowercase = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowerCAmelCase_ : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__lowercase ) def lowercase_ ( self ) -> Dict: self.enable_attention_slicing(__lowercase ) @torch.no_grad() def __call__( self , __lowercase , __lowercase = 5_1_2 , __lowercase = 5_1_2 , __lowercase = 5_0 , __lowercase = 7.5 , __lowercase = None , __lowercase = 1 , __lowercase = 0.0 , __lowercase = None , __lowercase = None , __lowercase = "pil" , __lowercase = True , __lowercase = None , __lowercase = 1 , __lowercase = None , **__lowercase , ) -> List[Any]: if isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : List[str] = 1 elif isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Dict = len(__lowercase ) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(__lowercase )}""" ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__lowercase , __lowercase ) or callback_steps <= 0) ): raise ValueError( f"""`callback_steps` has to be a positive integer but is {callback_steps} of type""" f""" {type(__lowercase )}.""" ) # get prompt text embeddings lowerCAmelCase_ : List[str] = self.tokenizer( __lowercase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , ) lowerCAmelCase_ : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: lowerCAmelCase_ : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( '''The following part of your input was truncated because CLIP can only handle sequences up to''' f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" ) lowerCAmelCase_ : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: lowerCAmelCase_ : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method lowerCAmelCase_ : Union[str, Any] = text_embeddings.shape lowerCAmelCase_ : List[str] = text_embeddings.repeat(1 , __lowercase , 1 ) lowerCAmelCase_ : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , __lowercase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowerCAmelCase_ : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowerCAmelCase_ : List[str] if negative_prompt is None: lowerCAmelCase_ : Any = [""""""] elif type(__lowercase ) is not type(__lowercase ): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(__lowercase )} !=""" f""" {type(__lowercase )}.""" ) elif isinstance(__lowercase , __lowercase ): lowerCAmelCase_ : Optional[int] = [negative_prompt] elif batch_size != len(__lowercase ): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(__lowercase )}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" ''' the batch size of `prompt`.''' ) else: lowerCAmelCase_ : Any = negative_prompt lowerCAmelCase_ : Dict = text_input_ids.shape[-1] lowerCAmelCase_ : List[Any] = self.tokenizer( __lowercase , padding='''max_length''' , max_length=__lowercase , truncation=__lowercase , return_tensors='''pt''' , ) lowerCAmelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method lowerCAmelCase_ : int = uncond_embeddings.shape[1] lowerCAmelCase_ : List[Any] = uncond_embeddings.repeat(__lowercase , __lowercase , 1 ) lowerCAmelCase_ : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowercase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowerCAmelCase_ : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowerCAmelCase_ : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) lowerCAmelCase_ : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 6_4, 6_4) lowerCAmelCase_ : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps lowerCAmelCase_ : Dict = torch.randn( __lowercase , generator=__lowercase , device='''cpu''' , dtype=__lowercase ).to(self.device ) lowerCAmelCase_ : int = torch.randn(__lowercase , generator=__lowercase , device='''cpu''' , dtype=__lowercase ).to( self.device ) else: lowerCAmelCase_ : int = torch.randn( __lowercase , generator=__lowercase , device=self.device , dtype=__lowercase ) lowerCAmelCase_ : int = torch.randn(__lowercase , generator=__lowercase , device=self.device , dtype=__lowercase ) else: if latents_reference.shape != latents_shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" ) lowerCAmelCase_ : Optional[Any] = latents_reference.to(self.device ) lowerCAmelCase_ : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images lowerCAmelCase_ : int = (latents_shape[3] - latents_shape_reference[3]) // 2 lowerCAmelCase_ : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 lowerCAmelCase_ : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx lowerCAmelCase_ : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy lowerCAmelCase_ : Optional[int] = 0 if dx < 0 else dx lowerCAmelCase_ : List[str] = 0 if dy < 0 else dy lowerCAmelCase_ : Union[str, Any] = max(-dx , 0 ) lowerCAmelCase_ : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() lowerCAmelCase_ : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(__lowercase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand lowerCAmelCase_ : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowerCAmelCase_ : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowerCAmelCase_ : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowerCAmelCase_ : Optional[Any] = {} if accepts_eta: lowerCAmelCase_ : List[str] = eta for i, t in enumerate(self.progress_bar(__lowercase ) ): # expand the latents if we are doing classifier free guidance lowerCAmelCase_ : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowerCAmelCase_ : str = self.scheduler.scale_model_input(__lowercase , __lowercase ) # predict the noise residual lowerCAmelCase_ : Any = self.unet(__lowercase , __lowercase , encoder_hidden_states=__lowercase ).sample # perform guidance if do_classifier_free_guidance: lowerCAmelCase_ : Any = noise_pred.chunk(2 ) lowerCAmelCase_ : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 lowerCAmelCase_ : Dict = self.scheduler.step(__lowercase , __lowercase , __lowercase , **__lowercase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__lowercase , __lowercase , __lowercase ) lowerCAmelCase_ : Union[str, Any] = 1 / 0.1_82_15 * latents lowerCAmelCase_ : Tuple = self.vae.decode(__lowercase ).sample lowerCAmelCase_ : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 lowerCAmelCase_ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: lowerCAmelCase_ : int = self.feature_extractor(self.numpy_to_pil(__lowercase ) , return_tensors='''pt''' ).to( self.device ) lowerCAmelCase_ : int = self.safety_checker( images=__lowercase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: lowerCAmelCase_ : Any = None if output_type == "pil": lowerCAmelCase_ : int = self.numpy_to_pil(__lowercase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=__lowercase , nsfw_content_detected=__lowercase )
262
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
0
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowercase_ : Optional[int] = { """configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""], """feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""], """processing_mctct""": ["""MCTCTProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ : Dict = [ """MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""", """MCTCTForCTC""", """MCTCTModel""", """MCTCTPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig from .feature_extraction_mctct import MCTCTFeatureExtractor from .processing_mctct import MCTCTProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel else: import sys lowercase_ : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
133
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
0
'''simple docstring''' from __future__ import annotations from collections import deque from collections.abc import Iterator from dataclasses import dataclass @dataclass class lowerCAmelCase__ : lowerCAmelCase : Optional[Any] = 42 lowerCAmelCase : List[str] = 42 class lowerCAmelCase__ : def __init__( self : Dict , lowerCamelCase__ : Any ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : list[list[Edge]] = [[] for _ in range(lowerCamelCase__ )] _UpperCAmelCase : List[str] = size def __getitem__( self : Tuple , lowerCamelCase__ : List[Any] ) ->Iterator[Edge]: '''simple docstring''' return iter(self._graph[vertex] ) @property def lowerCAmelCase__ ( self : List[str] ) ->List[Any]: '''simple docstring''' return self._size def lowerCAmelCase__ ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : Dict , lowerCamelCase__ : int ) ->Optional[int]: '''simple docstring''' if weight not in (0, 1): raise ValueError("Edge weight must be either 0 or 1." ) if to_vertex < 0 or to_vertex >= self.size: raise ValueError("Vertex indexes must be in [0; size)." ) self._graph[from_vertex].append(Edge(lowerCamelCase__ , lowerCamelCase__ ) ) def lowerCAmelCase__ ( self : Tuple , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict ) ->int | None: '''simple docstring''' _UpperCAmelCase : List[Any] = deque([start_vertex] ) _UpperCAmelCase : list[int | None] = [None] * self.size _UpperCAmelCase : List[Any] = 0 while queue: _UpperCAmelCase : str = queue.popleft() _UpperCAmelCase : Union[str, Any] = distances[current_vertex] if current_distance is None: continue for edge in self[current_vertex]: _UpperCAmelCase : Any = current_distance + edge.weight _UpperCAmelCase : Dict = distances[edge.destination_vertex] if ( isinstance(lowerCamelCase__ , lowerCamelCase__ ) and new_distance >= dest_vertex_distance ): continue _UpperCAmelCase : Optional[Any] = new_distance if edge.weight == 0: queue.appendleft(edge.destination_vertex ) else: queue.append(edge.destination_vertex ) if distances[finish_vertex] is None: raise ValueError("No path from start_vertex to finish_vertex." ) return distances[finish_vertex] if __name__ == "__main__": import doctest doctest.testmod()
234
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
0
"""simple docstring""" from multiprocessing import Lock, Pipe, Process # lock used to ensure that two processes do not access a pipe at the same time A_ : str = Lock() def A ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' global process_lock # we perform n swaps since after n swaps we know we are sorted # we *could* stop early if we are sorted already, but it takes as long to # find out we are sorted as it does to sort the list with this algorithm for i in range(0 , 10 ): if (i + position) % 2 == 0 and r_send is not None: # send your value to your right neighbor process_lock.acquire() r_send[1].send(_lowercase ) process_lock.release() # receive your right neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE__ = rr_cv[0].recv() process_lock.release() # take the lower value since you are on the left SCREAMING_SNAKE_CASE__ = min(_lowercase , _lowercase ) elif (i + position) % 2 != 0 and l_send is not None: # send your value to your left neighbor process_lock.acquire() l_send[1].send(_lowercase ) process_lock.release() # receive your left neighbor's value process_lock.acquire() SCREAMING_SNAKE_CASE__ = lr_cv[0].recv() process_lock.release() # take the higher value since you are on the right SCREAMING_SNAKE_CASE__ = max(_lowercase , _lowercase ) # after all swaps are performed, send the values back to main result_pipe[1].send(_lowercase ) def A ( snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = [] # initialize the list of pipes where the values will be retrieved for _ in arr: result_pipe.append(Pipe() ) # creates the processes # the first and last process only have one neighbor so they are made outside # of the loop SCREAMING_SNAKE_CASE__ = Pipe() SCREAMING_SNAKE_CASE__ = Pipe() process_array_.append( Process( target=_lowercase , args=(0, arr[0], None, temp_rs, None, temp_rr, result_pipe[0]) , ) ) SCREAMING_SNAKE_CASE__ = temp_rs SCREAMING_SNAKE_CASE__ = temp_rr for i in range(1 , len(_lowercase ) - 1 ): SCREAMING_SNAKE_CASE__ = Pipe() SCREAMING_SNAKE_CASE__ = Pipe() process_array_.append( Process( target=_lowercase , args=(i, arr[i], temp_ls, temp_rs, temp_lr, temp_rr, result_pipe[i]) , ) ) SCREAMING_SNAKE_CASE__ = temp_rs SCREAMING_SNAKE_CASE__ = temp_rr process_array_.append( Process( target=_lowercase , args=( len(_lowercase ) - 1, arr[len(_lowercase ) - 1], temp_ls, None, temp_lr, None, result_pipe[len(_lowercase ) - 1], ) , ) ) # start the processes for p in process_array_: p.start() # wait for the processes to end and write their values to the list for p in range(0 , len(_lowercase ) ): SCREAMING_SNAKE_CASE__ = result_pipe[p][0].recv() process_array_[p].join() return arr def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = list(range(10 , 0 , -1 ) ) print("""Initial List""" ) print(*_lowercase ) SCREAMING_SNAKE_CASE__ = odd_even_transposition(_lowercase ) print("""Sorted List\n""" ) print(*_lowercase ) if __name__ == "__main__": main()
165
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
0
import operator def a ( snake_case__: str , snake_case__: Tuple = False , snake_case__: Any = None ): '''simple docstring''' lowercase_ = operator.lt if reverse else operator.gt lowercase_ = solution or [] if not arr: return solution lowercase_ = [arr.pop(0 )] for i, item in enumerate(_lowercase ): if _operator(_lowercase , sublist[-1] ): sublist.append(_lowercase ) arr.pop(_lowercase ) # merging sublist into solution list if not solution: solution.extend(_lowercase ) else: while sublist: lowercase_ = sublist.pop(0 ) for i, xx in enumerate(_lowercase ): if not _operator(_lowercase , _lowercase ): solution.insert(_lowercase , _lowercase ) break else: solution.append(_lowercase ) strand_sort(_lowercase , _lowercase , _lowercase ) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
30
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
0
"""simple docstring""" from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar __SCREAMING_SNAKE_CASE : List[str] = TypeVar('T') class __A (Generic[T]): '''simple docstring''' __lowercase: Tuple = 42 # Cache store of keys __lowercase: int = 42 # References of the keys in cache __lowercase: Dict = 10 # Maximum capacity of cache def __init__( self : Optional[Any] , UpperCAmelCase_ : Tuple ) ->None: """simple docstring""" snake_case_ = deque() snake_case_ = set() if not n: snake_case_ = sys.maxsize elif n < 0: raise ValueError("""n should be an integer greater than 0.""" ) else: snake_case_ = n def lowerCAmelCase ( self : Union[str, Any] , UpperCAmelCase_ : Any ) ->None: """simple docstring""" if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: snake_case_ = self.dq_store.pop() self.key_reference.remove(UpperCAmelCase_ ) else: self.dq_store.remove(UpperCAmelCase_ ) self.dq_store.appendleft(UpperCAmelCase_ ) self.key_reference.add(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] ) ->None: """simple docstring""" for k in self.dq_store: print(UpperCAmelCase_ ) def __repr__( self : int ) ->str: """simple docstring""" return F"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}""" if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE : LRUCache[str | int] = LRUCache(4) lru_cache.refer('A') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('A') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
347
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
0
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any]) -> str: '''simple docstring''' __UpperCamelCase : int = len(_lowercase) __UpperCamelCase : int = len(_lowercase) __UpperCamelCase : int = ( first_str_length if first_str_length > second_str_length else second_str_length ) __UpperCamelCase : list = [] for char_count in range(_lowercase): if char_count < first_str_length: output_list.append(first_str[char_count]) if char_count < second_str_length: output_list.append(second_str[char_count]) return "".join(_lowercase) if __name__ == "__main__": print(alternative_string_arrange('AB', 'XYZ'), end=' ')
232
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
0
from typing import TYPE_CHECKING from ..utils import _LazyModule _lowercase: Tuple = { """config""": [ """EXTERNAL_DATA_FORMAT_SIZE_LIMIT""", """OnnxConfig""", """OnnxConfigWithPast""", """OnnxSeq2SeqConfigWithPast""", """PatchingSpec""", ], """convert""": ["""export""", """validate_model_outputs"""], """features""": ["""FeaturesManager"""], """utils""": ["""ParameterFormat""", """compute_serialized_parameters_size"""], } if TYPE_CHECKING: from .config import ( EXTERNAL_DATA_FORMAT_SIZE_LIMIT, OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast, PatchingSpec, ) from .convert import export, validate_model_outputs from .features import FeaturesManager from .utils import ParameterFormat, compute_serialized_parameters_size else: import sys _lowercase: Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
227
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
0
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase = LDMTextToImagePipeline lowerCAmelCase = TEXT_TO_IMAGE_PARAMS - { '''negative_prompt''', '''negative_prompt_embeds''', '''cross_attention_kwargs''', '''prompt_embeds''', } lowerCAmelCase = PipelineTesterMixin.required_optional_params - { '''num_images_per_prompt''', '''callback''', '''callback_steps''', } lowerCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS lowerCAmelCase = False def _UpperCamelCase ( self ) -> List[Any]: torch.manual_seed(0 ) snake_case_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) snake_case_ = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , ) torch.manual_seed(0 ) snake_case_ = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=('DownEncoderBlock2D', 'DownEncoderBlock2D') , up_block_types=('UpDecoderBlock2D', 'UpDecoderBlock2D') , latent_channels=4 , ) torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) snake_case_ = CLIPTextModel(a ) snake_case_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) snake_case_ = { """unet""": unet, """scheduler""": scheduler, """vqvae""": vae, """bert""": text_encoder, """tokenizer""": tokenizer, } return components def _UpperCamelCase ( self , a , a=0 ) -> Any: if str(a ).startswith('mps' ): snake_case_ = torch.manual_seed(a ) else: snake_case_ = torch.Generator(device=a ).manual_seed(a ) snake_case_ = { """prompt""": """A painting of a squirrel eating a burger""", """generator""": generator, """num_inference_steps""": 2, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _UpperCamelCase ( self ) -> Tuple: snake_case_ = """cpu""" # ensure determinism for the device-dependent torch.Generator snake_case_ = self.get_dummy_components() snake_case_ = LDMTextToImagePipeline(**a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) snake_case_ = self.get_dummy_inputs(a ) snake_case_ = pipe(**a ).images snake_case_ = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) snake_case_ = np.array([0.6_101, 0.6_156, 0.5_622, 0.4_895, 0.6_661, 0.3_804, 0.5_748, 0.6_136, 0.5_014] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ) -> Optional[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self , a , a=torch.floataa , a=0 ) -> Any: snake_case_ = torch.manual_seed(a ) snake_case_ = np.random.RandomState(a ).standard_normal((1, 4, 32, 32) ) snake_case_ = torch.from_numpy(a ).to(device=a , dtype=a ) snake_case_ = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _UpperCamelCase ( self ) -> Any: snake_case_ = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(a ) pipe.set_progress_bar_config(disable=a ) snake_case_ = self.get_inputs(a ) snake_case_ = pipe(**a ).images snake_case_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 2_56, 2_56, 3) snake_case_ = np.array([0.51_825, 0.52_850, 0.52_543, 0.54_258, 0.52_304, 0.52_569, 0.54_363, 0.55_276, 0.56_878] ) snake_case_ = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' def _UpperCamelCase ( self ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self , a , a=torch.floataa , a=0 ) -> Any: snake_case_ = torch.manual_seed(a ) snake_case_ = np.random.RandomState(a ).standard_normal((1, 4, 32, 32) ) snake_case_ = torch.from_numpy(a ).to(device=a , dtype=a ) snake_case_ = { """prompt""": """A painting of a squirrel eating a burger""", """latents""": latents, """generator""": generator, """num_inference_steps""": 50, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _UpperCamelCase ( self ) -> Any: snake_case_ = LDMTextToImagePipeline.from_pretrained('CompVis/ldm-text2im-large-256' ).to(a ) pipe.set_progress_bar_config(disable=a ) snake_case_ = self.get_inputs(a ) snake_case_ = pipe(**a ).images[0] snake_case_ = load_numpy( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy' ) snake_case_ = np.abs(expected_image - image ).max() assert max_diff < 1E-3
178
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
0
from __future__ import annotations import pandas as pd def lowerCamelCase__ ( snake_case_ : str , snake_case_ : List[str] , snake_case_ : List[Any] ) -> list[int]: __snake_case = [0] * no_of_processes __snake_case = [0] * no_of_processes # Copy the burst time into remaining_time[] for i in range(_lowercase ): __snake_case = burst_time[i] __snake_case = 0 __snake_case = 0 __snake_case = 9_9999_9999 __snake_case = 0 __snake_case = False # Process until all processes are completed while complete != no_of_processes: for j in range(_lowercase ): if arrival_time[j] <= increment_time and remaining_time[j] > 0: if remaining_time[j] < minm: __snake_case = remaining_time[j] __snake_case = j __snake_case = True if not check: increment_time += 1 continue remaining_time[short] -= 1 __snake_case = remaining_time[short] if minm == 0: __snake_case = 9_9999_9999 if remaining_time[short] == 0: complete += 1 __snake_case = False # Find finish time of current process __snake_case = increment_time + 1 # Calculate waiting time __snake_case = finish_time - arrival_time[short] __snake_case = finar - burst_time[short] if waiting_time[short] < 0: __snake_case = 0 # Increment time increment_time += 1 return waiting_time def lowerCamelCase__ ( snake_case_ : List[str] , snake_case_ : Tuple , snake_case_ : Any ) -> list[int]: __snake_case = [0] * no_of_processes for i in range(_lowercase ): __snake_case = burst_time[i] + waiting_time[i] return turn_around_time def lowerCamelCase__ ( snake_case_ : Dict , snake_case_ : Any , snake_case_ : List[Any] ) -> None: __snake_case = 0 __snake_case = 0 for i in range(_lowercase ): __snake_case = total_waiting_time + waiting_time[i] __snake_case = total_turn_around_time + turn_around_time[i] print(f"""Average waiting time = {total_waiting_time / no_of_processes:.5f}""" ) print('''Average turn around time =''' , total_turn_around_time / no_of_processes ) if __name__ == "__main__": print('Enter how many process you want to analyze') snake_case_ = int(input()) snake_case_ = [0] * no_of_processes snake_case_ = [0] * no_of_processes snake_case_ = list(range(1, no_of_processes + 1)) for i in range(no_of_processes): print('Enter the arrival time and burst time for process:--' + str(i + 1)) snake_case_ = map(int, input().split()) snake_case_ = calculate_waitingtime(arrival_time, burst_time, no_of_processes) snake_case_ = burst_time snake_case_ = no_of_processes snake_case_ = waiting_time snake_case_ = calculate_turnaroundtime(bt, n, wt) calculate_average_times(waiting_time, turn_around_time, no_of_processes) snake_case_ = pd.DataFrame( list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)), columns=[ 'Process', 'BurstTime', 'ArrivalTime', 'WaitingTime', 'TurnAroundTime', ], ) # Printing the dataFrame pd.set_option('display.max_rows', fcfs.shape[0] + 1) print(fcfs)
24
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase = logging.get_logger(__name__) UpperCAmelCase = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCAmelCase_ ( _lowercase): snake_case__ = '''megatron-bert''' def __init__( self : Dict , __UpperCamelCase : List[Any]=2_9056 , __UpperCamelCase : List[Any]=1024 , __UpperCamelCase : Tuple=24 , __UpperCamelCase : Optional[Any]=16 , __UpperCamelCase : Union[str, Any]=4096 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Optional[Any]=512 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : List[str]=1E-12 , __UpperCamelCase : Dict=0 , __UpperCamelCase : Union[str, Any]="absolute" , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : Any , ) -> Optional[int]: super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase ) _UpperCamelCase = vocab_size _UpperCamelCase = hidden_size _UpperCamelCase = num_hidden_layers _UpperCamelCase = num_attention_heads _UpperCamelCase = hidden_act _UpperCamelCase = intermediate_size _UpperCamelCase = hidden_dropout_prob _UpperCamelCase = attention_probs_dropout_prob _UpperCamelCase = max_position_embeddings _UpperCamelCase = type_vocab_size _UpperCamelCase = initializer_range _UpperCamelCase = layer_norm_eps _UpperCamelCase = position_embedding_type _UpperCamelCase = use_cache
256
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
0
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def lowerCAmelCase ( lowerCAmelCase_ )-> List[str]: lowerCAmelCase_ : List[str] = checkpoints.load_tax_checkpoint(_lowercase ) lowerCAmelCase_ : Union[str, Any] = flatten_dict(_lowercase ) return flax_params def lowerCAmelCase ( lowerCAmelCase_ )-> str: lowerCAmelCase_ : List[Any] = {} lowerCAmelCase_ : Optional[Any] = { """token_embedder""": """embeddings""", """encoder_norm""": """layernorm""", """kernel""": """weight""", """.out""": """.output""", """scale""": """weight""", """embedders_0.pos_embedding""": """row_embedder.weight""", """embedders_1.pos_embedding""": """column_embedder.weight""", } lowerCAmelCase_ : Tuple = { """query""": """attention.query""", """key""": """attention.key""", """value""": """attention.value""", """output.dense""": """output""", """encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""", """pre_self_attention_layer_norm""": """self_attention.layer_norm""", """pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""", """mlp.""": """mlp.DenseReluDense.""", """pre_mlp_layer_norm""": """mlp.layer_norm""", """self_attention.o""": """self_attention.attention.o""", """decoder.embeddings.embedding""": """decoder.embed_tokens.weight""", """decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""", """decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.logits_dense.weight""": """decoder.lm_head.weight""", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key lowerCAmelCase_ : str = """.""".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): lowerCAmelCase_ : Tuple = new_key.replace(_lowercase , _lowercase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): lowerCAmelCase_ : Dict = new_key.replace(_lowercase , _lowercase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number lowerCAmelCase_ : Dict = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _lowercase ) lowerCAmelCase_ : Tuple = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number lowerCAmelCase_ : List[str] = re.sub(r'''layers_(\d+)''' , r'''layer.\1''' , _lowercase ) lowerCAmelCase_ : Optional[int] = flax_dict[key] lowerCAmelCase_ : Any = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): lowerCAmelCase_ : Dict = torch.from_numpy(converted_dict[key].T ) else: lowerCAmelCase_ : Tuple = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False )-> Any: lowerCAmelCase_ : Optional[Any] = get_flax_param(_lowercase ) if not use_large: lowerCAmelCase_ : Dict = PixaStructVisionConfig() lowerCAmelCase_ : Any = PixaStructTextConfig() else: lowerCAmelCase_ : Union[str, Any] = PixaStructVisionConfig( hidden_size=1_536 , d_ff=3_968 , num_attention_heads=24 , num_hidden_layers=18 ) lowerCAmelCase_ : str = PixaStructTextConfig(hidden_size=1_536 , d_ff=3_968 , num_heads=24 , num_layers=18 ) lowerCAmelCase_ : int = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=_lowercase ) lowerCAmelCase_ : Union[str, Any] = PixaStructForConditionalGeneration(_lowercase ) lowerCAmelCase_ : Union[str, Any] = rename_and_convert_flax_params(_lowercase ) model.load_state_dict(_lowercase ) lowerCAmelCase_ : Any = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) lowerCAmelCase_ : str = PixaStructImageProcessor() lowerCAmelCase_ : Any = PixaStructProcessor(image_processor=_lowercase , tokenizer=_lowercase ) if use_large: lowerCAmelCase_ : List[str] = 4_096 lowerCAmelCase_ : Tuple = True # mkdir if needed os.makedirs(_lowercase , exist_ok=_lowercase ) model.save_pretrained(_lowercase ) processor.save_pretrained(_lowercase ) print('''Model saved in {}'''.format(_lowercase ) ) if __name__ == "__main__": _UpperCAmelCase : Tuple =argparse.ArgumentParser() parser.add_argument("""--t5x_checkpoint_path""", default=None, type=str, help="""Path to the original T5x checkpoint.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--use_large""", action="""store_true""", help="""Use large model.""") parser.add_argument("""--is_vqa""", action="""store_true""", help="""Use large model.""") _UpperCAmelCase : Tuple =parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
262
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
0
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class __lowerCAmelCase : snake_case_ : Tuple = BlenderbotSmallConfig snake_case_ : Any = {} snake_case_ : List[str] = "gelu" def __init__( self : Tuple , snake_case__ : Tuple , snake_case__ : int=13 , snake_case__ : int=7 , snake_case__ : Optional[int]=True , snake_case__ : int=False , snake_case__ : str=99 , snake_case__ : List[str]=32 , snake_case__ : List[str]=2 , snake_case__ : List[Any]=4 , snake_case__ : Any=37 , snake_case__ : List[str]=0.1 , snake_case__ : Any=0.1 , snake_case__ : Optional[int]=20 , snake_case__ : Optional[Any]=2 , snake_case__ : List[Any]=1 , snake_case__ : Union[str, Any]=0 , ): """simple docstring""" _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = seq_length _UpperCAmelCase = is_training _UpperCAmelCase = use_labels _UpperCAmelCase = vocab_size _UpperCAmelCase = hidden_size _UpperCAmelCase = num_hidden_layers _UpperCAmelCase = num_attention_heads _UpperCAmelCase = intermediate_size _UpperCAmelCase = hidden_dropout_prob _UpperCAmelCase = attention_probs_dropout_prob _UpperCAmelCase = max_position_embeddings _UpperCAmelCase = eos_token_id _UpperCAmelCase = pad_token_id _UpperCAmelCase = bos_token_id def UpperCamelCase ( self : List[str] ): """simple docstring""" _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) _UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) _UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 ) _UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) _UpperCAmelCase = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) _UpperCAmelCase = prepare_blenderbot_small_inputs_dict(snake_case__ , snake_case__ , snake_case__ ) return config, inputs_dict def UpperCamelCase ( self : Optional[int] , snake_case__ : str , snake_case__ : int ): """simple docstring""" _UpperCAmelCase = TFBlenderbotSmallModel(config=snake_case__ ).get_decoder() _UpperCAmelCase = inputs_dict["""input_ids"""] _UpperCAmelCase = input_ids[:1, :] _UpperCAmelCase = inputs_dict["""attention_mask"""][:1, :] _UpperCAmelCase = inputs_dict["""head_mask"""] _UpperCAmelCase = 1 # first forward pass _UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , head_mask=snake_case__ , use_cache=snake_case__ ) _UpperCAmelCase = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids _UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size ) _UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and _UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 ) _UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) _UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ )[0] _UpperCAmelCase = model(snake_case__ , attention_mask=snake_case__ , past_key_values=snake_case__ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice _UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) _UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx] _UpperCAmelCase = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(snake_case__ , snake_case__ , rtol=1e-3 ) def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , ): '''simple docstring''' if attention_mask is None: _UpperCAmelCase = tf.cast(tf.math.not_equal(_lowercase , config.pad_token_id ) , tf.inta ) if decoder_attention_mask is None: _UpperCAmelCase = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ), ] , axis=-1 , ) if head_mask is None: _UpperCAmelCase = tf.ones((config.encoder_layers, config.encoder_attention_heads) ) if decoder_head_mask is None: _UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) if cross_attn_head_mask is None: _UpperCAmelCase = tf.ones((config.decoder_layers, config.decoder_attention_heads) ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class __lowerCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ): snake_case_ : List[str] = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) snake_case_ : int = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () snake_case_ : Optional[Any] = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) snake_case_ : Optional[int] = True snake_case_ : List[Any] = False snake_case_ : Any = False def UpperCamelCase ( self : Optional[int] ): """simple docstring""" _UpperCAmelCase = TFBlenderbotSmallModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case__ ) def UpperCamelCase ( self : Dict ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase ( self : str ): """simple docstring""" _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*snake_case__ ) @require_tokenizers @require_tf class __lowerCAmelCase ( unittest.TestCase ): snake_case_ : Optional[Any] = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i\'m going to throw up.\nand why is that?" ] snake_case_ : Optional[Any] = "facebook/blenderbot_small-90M" @cached_property def UpperCamelCase ( self : Optional[int] ): """simple docstring""" return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def UpperCamelCase ( self : List[str] ): """simple docstring""" _UpperCAmelCase = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def UpperCamelCase ( self : int ): """simple docstring""" _UpperCAmelCase = self.tokenizer(self.src_text , return_tensors="tf" ) _UpperCAmelCase = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=snake_case__ , ) _UpperCAmelCase = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=snake_case__ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
133
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
0
'''simple docstring''' def __lowerCAmelCase (__lowerCAmelCase = 4_000_000 ): _UpperCAmelCase : Optional[Any] = [0, 1] _UpperCAmelCase : int = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1] ) if fib[i + 2] > n: break i += 1 _UpperCAmelCase : str = 0 for j in range(len(_lowercase ) - 1 ): if fib[j] % 2 == 0: total += fib[j] return total if __name__ == "__main__": print(F'''{solution() = }''')
234
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
0
"""simple docstring""" def A ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = 0 for i in range(1 , 10_01 ): total += i**i return str(_lowercase )[-10:] if __name__ == "__main__": print(solution())
165
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
0
from typing import List, Optional from tokenizers import ByteLevelBPETokenizer from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot_small import BlenderbotSmallTokenizer __a = logging.get_logger(__name__) __a = { """vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_config_file""": """tokenizer_config.json""", } __a = { """vocab_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json""" }, """merges_file""": { """facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt""" }, """tokenizer_config_file""": { """facebook/blenderbot_small-90M""": ( """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json""" ) }, } __a = { """facebook/blenderbot_small-90M""": 5_1_2, } class lowercase__( UpperCAmelCase ): """simple docstring""" a :List[Any] = VOCAB_FILES_NAMES a :str = PRETRAINED_VOCAB_FILES_MAP a :str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a :Union[str, Any] = BlenderbotSmallTokenizer def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict=None , SCREAMING_SNAKE_CASE_ : List[str]=None , SCREAMING_SNAKE_CASE_ : List[str]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Union[str, Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : str="<|endoftext|>" , SCREAMING_SNAKE_CASE_ : Any=False , SCREAMING_SNAKE_CASE_ : Tuple=True , **SCREAMING_SNAKE_CASE_ : int , ) -> List[Any]: super().__init__( ByteLevelBPETokenizer( vocab=SCREAMING_SNAKE_CASE_ , merges=SCREAMING_SNAKE_CASE_ , add_prefix_space=SCREAMING_SNAKE_CASE_ , trim_offsets=SCREAMING_SNAKE_CASE_ , ) , bos_token=SCREAMING_SNAKE_CASE_ , eos_token=SCREAMING_SNAKE_CASE_ , unk_token=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , ) lowercase_ = add_prefix_space def _lowercase ( self : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Any=None ) -> str: lowercase_ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : List[str] = None ) -> List[int]: lowercase_ = [self.sep_token_id] lowercase_ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
30
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__) def _a ( _SCREAMING_SNAKE_CASE ) -> YolosConfig: snake_case_ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: snake_case_ = 192 snake_case_ = 768 snake_case_ = 12 snake_case_ = 3 snake_case_ = [800, 1_333] snake_case_ = False elif yolos_name == "yolos_s_dWr": snake_case_ = 330 snake_case_ = 14 snake_case_ = 6 snake_case_ = 1_320 elif "yolos_s" in yolos_name: snake_case_ = 384 snake_case_ = 1_536 snake_case_ = 12 snake_case_ = 6 elif "yolos_b" in yolos_name: snake_case_ = [800, 1_344] snake_case_ = 91 snake_case_ = """huggingface/label-files""" snake_case_ = """coco-detection-id2label.json""" snake_case_ = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) snake_case_ = {int(_lowercase ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} return config def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Tuple: for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) snake_case_ = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict snake_case_ = in_proj_weight[: config.hidden_size, :] snake_case_ = in_proj_bias[: config.hidden_size] snake_case_ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case_ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case_ = in_proj_weight[-config.hidden_size :, :] snake_case_ = in_proj_bias[-config.hidden_size :] def _a ( _SCREAMING_SNAKE_CASE ) -> str: if "backbone" in name: snake_case_ = name.replace("""backbone""" , """vit""" ) if "cls_token" in name: snake_case_ = name.replace("""cls_token""" , """embeddings.cls_token""" ) if "det_token" in name: snake_case_ = name.replace("""det_token""" , """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: snake_case_ = name.replace("""mid_pos_embed""" , """encoder.mid_position_embeddings""" ) if "pos_embed" in name: snake_case_ = name.replace("""pos_embed""" , """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: snake_case_ = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "blocks" in name: snake_case_ = name.replace("""blocks""" , """encoder.layer""" ) if "attn.proj" in name: snake_case_ = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: snake_case_ = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: snake_case_ = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: snake_case_ = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: snake_case_ = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: snake_case_ = name.replace("""mlp.fc2""" , """output.dense""" ) if "class_embed" in name: snake_case_ = name.replace("""class_embed""" , """class_labels_classifier""" ) if "bbox_embed" in name: snake_case_ = name.replace("""bbox_embed""" , """bbox_predictor""" ) if "vit.norm" in name: snake_case_ = name.replace("""vit.norm""" , """vit.layernorm""" ) return name def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> dict: for key in orig_state_dict.copy().keys(): snake_case_ = orig_state_dict.pop(_lowercase ) if "qkv" in key: snake_case_ = key.split(""".""" ) snake_case_ = int(key_split[2] ) snake_case_ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: snake_case_ = val[:dim, :] snake_case_ = val[ dim : dim * 2, : ] snake_case_ = val[-dim:, :] else: snake_case_ = val[:dim] snake_case_ = val[dim : dim * 2] snake_case_ = val[-dim:] else: snake_case_ = val return orig_state_dict def _a ( ) -> torch.Tensor: snake_case_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" snake_case_ = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im @torch.no_grad() def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ) -> Optional[int]: snake_case_ = get_yolos_config(_lowercase ) # load original state_dict snake_case_ = torch.load(_lowercase , map_location="""cpu""" )["""model"""] # load 🤗 model snake_case_ = YolosForObjectDetection(_lowercase ) model.eval() snake_case_ = convert_state_dict(_lowercase , _lowercase ) model.load_state_dict(_lowercase ) # Check outputs on an image, prepared by YolosImageProcessor snake_case_ = 800 if yolos_name != """yolos_ti""" else 512 snake_case_ = YolosImageProcessor(format="""coco_detection""" , size=_lowercase ) snake_case_ = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case_ = model(**_lowercase ) snake_case_ = outputs.logits, outputs.pred_boxes snake_case_ = None, None if yolos_name == "yolos_ti": snake_case_ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) snake_case_ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": snake_case_ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) snake_case_ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": snake_case_ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) snake_case_ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": snake_case_ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) snake_case_ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": snake_case_ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) snake_case_ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , _lowercase , atol=1E-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , _lowercase , atol=1E-4 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(_lowercase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(_lowercase ) if push_to_hub: snake_case_ = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) snake_case_ = model_mapping[yolos_name] image_processor.push_to_hub(_lowercase , organization="""hustvl""" ) model.push_to_hub(_lowercase , organization="""hustvl""" ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
347
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
0
import copy import random from transformers import CLIPTokenizer class lowerCamelCase__ ( __lowercase): '''simple docstring''' def __init__( self :str , *a :Tuple , **a :str ) -> Dict: super().__init__(*a , **a ) __UpperCamelCase : int = {} def _lowerCamelCase ( self :Dict , a :Any , *a :Optional[int] , **a :Union[str, Any] ) -> int: __UpperCamelCase : List[Any] = super().add_tokens(a , *a , **a ) if num_added_tokens == 0: raise ValueError( f'The tokenizer already contains the token {placeholder_token}. Please pass a different' " `placeholder_token` that is not already in the tokenizer." ) def _lowerCamelCase ( self :Optional[Any] , a :str , *a :str , a :Dict=1 , **a :List[Any] ) -> List[str]: __UpperCamelCase : int = [] if num_vec_per_token == 1: self.try_adding_tokens(a , *a , **a ) output.append(a ) else: __UpperCamelCase : Optional[Any] = [] for i in range(a ): __UpperCamelCase : Tuple = placeholder_token + f'_{i}' self.try_adding_tokens(a , *a , **a ) output.append(a ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( f'The tokenizer already has placeholder token {token} that can get confused with' f' {placeholder_token}keep placeholder tokens independent' ) __UpperCamelCase : Tuple = output def _lowerCamelCase ( self :Optional[Any] , a :Tuple , a :List[str]=False , a :Tuple=1.0 ) -> Dict: if isinstance(a , a ): __UpperCamelCase : List[Any] = [] for i in range(len(a ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=a ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: __UpperCamelCase : Any = self.token_map[placeholder_token] __UpperCamelCase : Dict = tokens[: 1 + int(len(a ) * prop_tokens_to_load )] if vector_shuffle: __UpperCamelCase : str = copy.copy(a ) random.shuffle(a ) __UpperCamelCase : List[Any] = text.replace(a , " ".join(a ) ) return text def __call__( self :Optional[int] , a :Optional[Any] , *a :str , a :Union[str, Any]=False , a :Optional[int]=1.0 , **a :Optional[Any] ) -> str: return super().__call__( self.replace_placeholder_tokens_in_text( a , vector_shuffle=a , prop_tokens_to_load=a ) , *a , **a , ) def _lowerCamelCase ( self :Tuple , a :Union[str, Any] , *a :Tuple , a :Tuple=False , a :Optional[Any]=1.0 , **a :Optional[int] ) -> Union[str, Any]: return super().encode( self.replace_placeholder_tokens_in_text( a , vector_shuffle=a , prop_tokens_to_load=a ) , *a , **a , )
232
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
0
import math import random def a( A : Tuple , A : Dict = False ) -> float: """simple docstring""" if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value _lowercase: Union[str, Any] = 0.02 def a( A : Any , A : Optional[int] ) -> float: """simple docstring""" a = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(_lowercase ): # Forward propagation a = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? a = (expected / 100) - layer_a # Error delta a = layer_1_error * sigmoid_function(_lowercase , _lowercase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() _lowercase: List[str] = int(input("Expected value: ")) _lowercase: List[str] = int(input("Number of propagations: ")) print(forward_propagation(expected, number_propagations))
227
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
0
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument lowercase = { """/attention/""": """/0/SelfAttention/""", """/self_attention/""": """/0/SelfAttention/""", """/encoder_decoder_attention/""": """/1/EncDecAttention/""", """value""": """v""", """query""": """q""", """key""": """k""", """out""": """o""", """pre_self_attention_layer_norm""": """0/layer_norm""", """pre_cross_attention_layer_norm""": """1/layer_norm""", """pre_attention_layer_norm""": """0/layer_norm""", # previously 1, but seems wrong """token_embedder""": """shared""", """encoder_norm""": """final_layer_norm""", """decoder_norm""": """final_layer_norm""", """relpos_bias/rel_embedding""": """block/0/layer/0/SelfAttention/relative_attention_bias/weight""", """router/router_weights/w/""": """router/classifier/""", """roer/roer_weights/w/""": """router/classifier/""", """logits_dense""": """lm_head""", } def __UpperCAmelCase ( a_): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model snake_case_ = list(s_dict.keys()) for key in keys: snake_case_ = R""".*/layers_(\d+)""" snake_case_ = key if re.match(_lowercase , _lowercase): snake_case_ = re.sub(R'layers_(\d+)' , R'block/\1/layer' , _lowercase) snake_case_ = R"""(encoder|decoder)\/""" if re.match(_lowercase , _lowercase): snake_case_ = re.match(_lowercase , _lowercase).groups() if groups[0] == "encoder": snake_case_ = re.sub(R'/mlp/' , R'/1/mlp/' , _lowercase) snake_case_ = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , _lowercase) elif groups[0] == "decoder": snake_case_ = re.sub(R'/mlp/' , R'/2/mlp/' , _lowercase) snake_case_ = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , _lowercase) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: snake_case_ = new_key.replace(_lowercase , _lowercase) print(f'''{key} -> {new_key}''') snake_case_ = s_dict.pop(_lowercase) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case_ = s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: snake_case_ = s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys()): if "expert" in key: snake_case_ = s_dict[key].shape[0] snake_case_ = s_dict[key] for idx in range(_lowercase): snake_case_ = expert_weihts[idx] print(f'''{key} -> {key.replace("expert/" , "nested fstring")}''') s_dict.pop(_lowercase) return s_dict lowercase = { """NUM_ENCODER_LAYERS""": """num_layers""", """NUM_DECODER_LAYERS""": """num_decoder_layers""", """NUM_HEADS""": """num_heads""", """HEAD_DIM""": """d_kv""", """EMBED_DIM""": """d_model""", """MLP_DIM""": """d_ff""", """NUM_SELECTED_EXPERTS""": """num_selected_experts""", """NUM_ENCODER_SPARSE_LAYERS""": """num_sparse_encoder_layers""", """NUM_DECODER_SPARSE_LAYERS""": """num_sparse_decoder_layers""", """dense.MlpBlock.activations""": """feed_forward_proj""", } def __UpperCAmelCase ( a_ , a_): # Convert a google style config to the hugging face fromat import regex as re with open(_lowercase , 'r') as f: snake_case_ = f.read() snake_case_ = re.findall(R'(.*) = ([0-9.]*)' , _lowercase) snake_case_ = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": snake_case_ = float(_lowercase) if """.""" in value else int(_lowercase) snake_case_ = re.findall(R'(.*activations) = \(\'(.*)\',\)' , _lowercase)[0] snake_case_ = str(activation[1]) snake_case_ = num_experts snake_case_ = SwitchTransformersConfig(**_lowercase) return config def __UpperCAmelCase ( a_ , a_ , a_=None , a_="./" , a_=8): # Initialise PyTorch model print(f'''Loading flax weights from : {flax_checkpoint_path}''') snake_case_ = checkpoints.load_tax_checkpoint(_lowercase) if gin_file is not None: snake_case_ = convert_gin_to_config(_lowercase , _lowercase) else: snake_case_ = SwitchTransformersConfig.from_pretrained(_lowercase) snake_case_ = SwitchTransformersForConditionalGeneration(_lowercase) snake_case_ = flax_params["""target"""] snake_case_ = flatten_dict(_lowercase , sep='/') snake_case_ = rename_keys(_lowercase) snake_case_ = unflatten_dict(_lowercase , sep='/') # Load the flax params in the PT model load_flax_weights_in_pytorch_model(_lowercase , _lowercase) print(f'''Save PyTorch model to {pytorch_dump_path}''') pt_model.save_pretrained(_lowercase) if __name__ == "__main__": lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( "--switch_t5x_checkpoint_path", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the" " model architecture. If not provided, a `gin_file` has to be provided." ), ) parser.add_argument( "--gin_file", default=None, type=str, required=False, help="Path to the gin config file. If not provided, a `config_file` has to be passed ", ) parser.add_argument( "--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model." ) parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts") lowercase = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
178
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
0
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets snake_case_ = """ @inproceedings{xu-etal-2016-optimizing, title = {Optimizing Statistical Machine Translation for Text Simplification}, authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris}, journal = {Transactions of the Association for Computational Linguistics}, volume = {4}, year={2016}, url = {https://www.aclweb.org/anthology/Q16-1029}, pages = {401--415 }, @inproceedings{post-2018-call, title = \"A Call for Clarity in Reporting {BLEU} Scores\", author = \"Post, Matt\", booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\", month = oct, year = \"2018\", address = \"Belgium, Brussels\", publisher = \"Association for Computational Linguistics\", url = \"https://www.aclweb.org/anthology/W18-6319\", pages = \"186--191\", } """ snake_case_ = """\ WIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU It can be used to evaluate the quality of machine-generated texts. """ snake_case_ = """ Calculates sari score (between 0 and 100) given a list of source and predicted sentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score. Args: sources: list of source sentences where each sentence should be a string. predictions: list of predicted sentences where each sentence should be a string. references: list of lists of reference sentences where each sentence should be a string. Returns: sari: sari score sacrebleu: sacrebleu score exact: exact score Examples: >>> sources=[\"About 95 species are currently accepted .\"] >>> predictions=[\"About 95 you now get in .\"] >>> references=[[\"About 95 species are currently known .\"]] >>> wiki_split = datasets.load_metric(\"wiki_split\") >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references) >>> print(results) {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0} """ def lowerCamelCase__ ( snake_case_ : int ) -> List[Any]: def remove_articles(snake_case_ : int ): __snake_case = re.compile(R'''\b(a|an|the)\b''' , re.UNICODE ) return re.sub(_lowercase , ''' ''' , _lowercase ) def white_space_fix(snake_case_ : Dict ): return " ".join(text.split() ) def remove_punc(snake_case_ : str ): __snake_case = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(snake_case_ : Optional[int] ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_lowercase ) ) ) ) def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Union[str, Any] ) -> List[Any]: return int(normalize_answer(_lowercase ) == normalize_answer(_lowercase ) ) def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] ) -> Tuple: __snake_case = [any(compute_exact(_lowercase , _lowercase ) for ref in refs ) for pred, refs in zip(_lowercase , _lowercase )] return (sum(_lowercase ) / len(_lowercase )) * 100 def lowerCamelCase__ ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[Any] ) -> Dict: __snake_case = [rgram for rgrams in rgramslist for rgram in rgrams] __snake_case = Counter(_lowercase ) __snake_case = Counter(_lowercase ) __snake_case = Counter() for sgram, scount in sgramcounter.items(): __snake_case = scount * numref __snake_case = Counter(_lowercase ) __snake_case = Counter() for cgram, ccount in cgramcounter.items(): __snake_case = ccount * numref # KEEP __snake_case = sgramcounter_rep & cgramcounter_rep __snake_case = keepgramcounter_rep & rgramcounter __snake_case = sgramcounter_rep & rgramcounter __snake_case = 0 __snake_case = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __snake_case = 1 __snake_case = 1 if len(_lowercase ) > 0: __snake_case = keeptmpscorea / len(_lowercase ) if len(_lowercase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __snake_case = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __snake_case = 0 if keepscore_precision > 0 or keepscore_recall > 0: __snake_case = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __snake_case = sgramcounter_rep - cgramcounter_rep __snake_case = delgramcounter_rep - rgramcounter __snake_case = sgramcounter_rep - rgramcounter __snake_case = 0 __snake_case = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __snake_case = 1 if len(_lowercase ) > 0: __snake_case = deltmpscorea / len(_lowercase ) # ADDITION __snake_case = set(_lowercase ) - set(_lowercase ) __snake_case = set(_lowercase ) & set(_lowercase ) __snake_case = set(_lowercase ) - set(_lowercase ) __snake_case = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __snake_case = 1 __snake_case = 1 if len(_lowercase ) > 0: __snake_case = addtmpscore / len(_lowercase ) if len(_lowercase ) > 0: __snake_case = addtmpscore / len(_lowercase ) __snake_case = 0 if addscore_precision > 0 or addscore_recall > 0: __snake_case = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowerCamelCase__ ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Dict ) -> str: __snake_case = len(_lowercase ) __snake_case = ssent.split(''' ''' ) __snake_case = csent.split(''' ''' ) __snake_case = [] __snake_case = [] __snake_case = [] __snake_case = [] __snake_case = [] __snake_case = [] __snake_case = [] __snake_case = [] __snake_case = [] __snake_case = [] for rsent in rsents: __snake_case = rsent.split(''' ''' ) __snake_case = [] __snake_case = [] __snake_case = [] ragramslist.append(_lowercase ) for i in range(0 , len(_lowercase ) - 1 ): if i < len(_lowercase ) - 1: __snake_case = ragrams[i] + """ """ + ragrams[i + 1] ragrams.append(_lowercase ) if i < len(_lowercase ) - 2: __snake_case = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] ragrams.append(_lowercase ) if i < len(_lowercase ) - 3: __snake_case = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3] ragrams.append(_lowercase ) ragramslist.append(_lowercase ) ragramslist.append(_lowercase ) ragramslist.append(_lowercase ) for i in range(0 , len(_lowercase ) - 1 ): if i < len(_lowercase ) - 1: __snake_case = sagrams[i] + """ """ + sagrams[i + 1] sagrams.append(_lowercase ) if i < len(_lowercase ) - 2: __snake_case = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] sagrams.append(_lowercase ) if i < len(_lowercase ) - 3: __snake_case = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3] sagrams.append(_lowercase ) for i in range(0 , len(_lowercase ) - 1 ): if i < len(_lowercase ) - 1: __snake_case = cagrams[i] + """ """ + cagrams[i + 1] cagrams.append(_lowercase ) if i < len(_lowercase ) - 2: __snake_case = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] cagrams.append(_lowercase ) if i < len(_lowercase ) - 3: __snake_case = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3] cagrams.append(_lowercase ) (__snake_case) = SARIngram(_lowercase , _lowercase , _lowercase , _lowercase ) (__snake_case) = SARIngram(_lowercase , _lowercase , _lowercase , _lowercase ) (__snake_case) = SARIngram(_lowercase , _lowercase , _lowercase , _lowercase ) (__snake_case) = SARIngram(_lowercase , _lowercase , _lowercase , _lowercase ) __snake_case = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __snake_case = sum([delascore, delascore, delascore, delascore] ) / 4 __snake_case = sum([addascore, addascore, addascore, addascore] ) / 4 __snake_case = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowerCamelCase__ ( snake_case_ : Optional[int] , snake_case_ : str = True , snake_case_ : Dict = "13a" , snake_case_ : int = True ) -> Union[str, Any]: # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: __snake_case = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __snake_case = sacrebleu.metrics.bleu._get_tokenizer(_lowercase )()(_lowercase ) else: __snake_case = sacrebleu.TOKENIZERS[tokenizer]()(_lowercase ) elif tokenizer == "moses": __snake_case = sacremoses.MosesTokenizer().tokenize(_lowercase , return_str=_lowercase , escape=_lowercase ) elif tokenizer == "penn": __snake_case = sacremoses.MosesTokenizer().penn_tokenize(_lowercase , return_str=_lowercase ) else: __snake_case = sentence if not return_str: __snake_case = normalized_sent.split() return normalized_sent def lowerCamelCase__ ( snake_case_ : int , snake_case_ : str , snake_case_ : Optional[Any] ) -> Any: if not (len(_lowercase ) == len(_lowercase ) == len(_lowercase )): raise ValueError('''Sources length must match predictions and references lengths.''' ) __snake_case = 0 for src, pred, refs in zip(_lowercase , _lowercase , _lowercase ): sari_score += SARIsent(normalize(_lowercase ) , normalize(_lowercase ) , [normalize(_lowercase ) for sent in refs] ) __snake_case = sari_score / len(_lowercase ) return 100 * sari_score def lowerCamelCase__ ( snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : List[Any]="exp" , snake_case_ : str=None , snake_case_ : str=False , snake_case_ : Tuple=False , snake_case_ : Optional[Any]=False , ) -> Tuple: __snake_case = len(references[0] ) if any(len(_lowercase ) != references_per_prediction for refs in references ): raise ValueError('''Sacrebleu requires the same number of references for each prediction''' ) __snake_case = [[refs[i] for refs in references] for i in range(_lowercase )] __snake_case = sacrebleu.corpus_bleu( _lowercase , _lowercase , smooth_method=_lowercase , smooth_value=_lowercase , force=_lowercase , lowercase=_lowercase , use_effective_order=_lowercase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def a (self : List[Any] ): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Sequence(datasets.Value('''string''' , id='''sequence''' ) , id='''references''' ), } ) , codebase_urls=[ '''https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py''', '''https://github.com/cocoxu/simplification/blob/master/SARI.py''', '''https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py''', '''https://github.com/mjpost/sacreBLEU''', ] , reference_urls=[ '''https://www.aclweb.org/anthology/Q16-1029.pdf''', '''https://github.com/mjpost/sacreBLEU''', '''https://en.wikipedia.org/wiki/BLEU''', '''https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213''', ] , ) def a (self : Dict , a__ : Any , a__ : str , a__ : Tuple ): """simple docstring""" __snake_case = {} result.update({'''sari''': compute_sari(sources=a__ , predictions=a__ , references=a__ )} ) result.update({'''sacrebleu''': compute_sacrebleu(predictions=a__ , references=a__ )} ) result.update({'''exact''': compute_em(predictions=a__ , references=a__ )} ) return result
24
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""ReformerTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = ["""ReformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase = [ """REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """ReformerAttention""", """ReformerForMaskedLM""", """ReformerForQuestionAnswering""", """ReformerForSequenceClassification""", """ReformerLayer""", """ReformerModel""", """ReformerModelWithLMHead""", """ReformerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
256
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
0
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES _UpperCAmelCase : Any =logging.get_logger(__name__) _UpperCAmelCase : Tuple =OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) _UpperCAmelCase : int =OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _UpperCAmelCase : Optional[Any] =OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) _UpperCAmelCase : Tuple =OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) _UpperCAmelCase : Optional[Any] =OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) _UpperCAmelCase : Optional[int] =OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) _UpperCAmelCase : Any =OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) _UpperCAmelCase : Any =OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) _UpperCAmelCase : str =OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) _UpperCAmelCase : Optional[int] =OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) _UpperCAmelCase : Any =OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) _UpperCAmelCase : Any =OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) _UpperCAmelCase : Union[str, Any] =OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) _UpperCAmelCase : int =OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) _UpperCAmelCase : List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) _UpperCAmelCase : int =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) _UpperCAmelCase : List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) _UpperCAmelCase : str =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) _UpperCAmelCase : Optional[int] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) _UpperCAmelCase : Optional[Any] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) _UpperCAmelCase : List[str] =_LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) _UpperCAmelCase : int =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) _UpperCAmelCase : Dict =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) _UpperCAmelCase : Optional[Any] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) _UpperCAmelCase : List[str] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) _UpperCAmelCase : int =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) _UpperCAmelCase : List[str] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) _UpperCAmelCase : Optional[int] =_LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = FLAX_MODEL_MAPPING _UpperCAmelCase : Any =auto_class_update(FlaxAutoModel) class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = FLAX_MODEL_FOR_PRETRAINING_MAPPING _UpperCAmelCase : Dict =auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING _UpperCAmelCase : str =auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = FLAX_MODEL_FOR_MASKED_LM_MAPPING _UpperCAmelCase : Optional[Any] =auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING _UpperCAmelCase : str =auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING _UpperCAmelCase : Dict =auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING _UpperCAmelCase : Any =auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING _UpperCAmelCase : str =auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING _UpperCAmelCase : List[str] =auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING _UpperCAmelCase : Dict =auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING _UpperCAmelCase : Dict =auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING _UpperCAmelCase : Tuple =auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class snake_case__( _BaseAutoModelClass ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING _UpperCAmelCase : Union[str, Any] =auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
262
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a : Optional[int] = { """configuration_git""": ["""GIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GitConfig""", """GitVisionConfig"""], """processing_git""": ["""GitProcessor"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a : Tuple = [ """GIT_PRETRAINED_MODEL_ARCHIVE_LIST""", """GitForCausalLM""", """GitModel""", """GitPreTrainedModel""", """GitVisionModel""", ] if TYPE_CHECKING: from .configuration_git import GIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GitConfig, GitVisionConfig from .processing_git import GitProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_git import ( GIT_PRETRAINED_MODEL_ARCHIVE_LIST, GitForCausalLM, GitModel, GitPreTrainedModel, GitVisionModel, ) else: import sys a : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
265
0
import math def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ): '''simple docstring''' _UpperCAmelCase = len(_lowercase ) _UpperCAmelCase = int(math.floor(math.sqrt(_lowercase ) ) ) _UpperCAmelCase = 0 while arr[min(_lowercase , _lowercase ) - 1] < x: _UpperCAmelCase = step step += int(math.floor(math.sqrt(_lowercase ) ) ) if prev >= n: return -1 while arr[prev] < x: _UpperCAmelCase = prev + 1 if prev == min(_lowercase , _lowercase ): return -1 if arr[prev] == x: return prev return -1 if __name__ == "__main__": lowercase_ : Optional[int] = input('Enter numbers separated by a comma:\n').strip() lowercase_ : Tuple = [int(item) for item in user_input.split(',')] lowercase_ : List[Any] = int(input('Enter the number to be searched:\n')) lowercase_ : Tuple = jump_search(arr, x) if res == -1: print('Number not found!') else: print(f"""Number {x} is at index {res}""")
133
'''simple docstring''' import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = LongformerTokenizer lowercase = True lowercase = LongformerTokenizerFast lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase : List[str] = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] UpperCAmelCase : int = dict(zip(A , range(len(A ) ) ) ) UpperCAmelCase : Any = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(A ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(A ) ) def _lowercase( self , **A ) -> Optional[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , **A ) -> int: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **A ) def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : Optional[Any] = """lower newer""" UpperCAmelCase : Optional[int] = """lower newer""" return input_text, output_text def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Tuple = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map ) UpperCAmelCase : Dict = """lower newer""" UpperCAmelCase : int = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""] UpperCAmelCase : Tuple = tokenizer.tokenize(A ) # , add_prefix_space=True) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokens + [tokenizer.unk_token] UpperCAmelCase : Tuple = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , A ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = self.get_tokenizer() self.assertListEqual(tokenizer.encode("""Hello world!""" , add_special_tokens=A ) , [0, 31414, 232, 328, 2] ) self.assertListEqual( tokenizer.encode("""Hello world! cécé herlolip 418""" , add_special_tokens=A ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , ) @slow def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Any = self.tokenizer_class.from_pretrained("""allenai/longformer-base-4096""" ) UpperCAmelCase : List[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=A ) UpperCAmelCase : Optional[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : List[str] = tokenizer.encode( """sequence builders""" , """multi-sequence build""" , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : Any = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = self.get_tokenizer() UpperCAmelCase : List[Any] = """Encode this sequence.""" UpperCAmelCase : List[str] = tokenizer.byte_encoder[""" """.encode("""utf-8""" )[0]] # Testing encoder arguments UpperCAmelCase : Union[str, Any] = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertNotEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A , add_special_tokens=A , add_prefix_space=A ) UpperCAmelCase : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0] self.assertEqual(A , A ) tokenizer.add_special_tokens({"""bos_token""": """<s>"""} ) UpperCAmelCase : int = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0] self.assertNotEqual(A , A ) # Testing spaces after special tokens UpperCAmelCase : Union[str, Any] = """<mask>""" tokenizer.add_special_tokens( {"""mask_token""": AddedToken(A , lstrip=A , rstrip=A )} ) # mask token has a left space UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) UpperCAmelCase : Union[str, Any] = """Encode <mask> sequence""" UpperCAmelCase : Union[str, Any] = """Encode <mask>sequence""" UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Union[str, Any] = encoded.index(A ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertEqual(A , A ) UpperCAmelCase : Tuple = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = encoded.index(A ) UpperCAmelCase : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0] self.assertNotEqual(A , A ) def _lowercase( self ) -> Optional[int]: pass def _lowercase( self ) -> Any: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : int = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Dict = """A, <mask> AllenNLP sentence.""" UpperCAmelCase : Dict = tokenizer_r.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) UpperCAmelCase : Tuple = tokenizer_p.encode_plus(A , add_special_tokens=A , return_token_type_ids=A ) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) UpperCAmelCase : int = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( A , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) def _lowercase( self ) -> List[Any]: for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ): UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( self.tmpdirname , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() ) UpperCAmelCase : Optional[int] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() ) self.assertEqual(pre_tokenizer_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""add_prefix_space"""] , A ) self.assertEqual(post_processor_state["""trim_offsets"""] , A ) def _lowercase( self ) -> Optional[Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` UpperCAmelCase : int = f'''{text_of_1_token} {text_of_1_token}''' UpperCAmelCase : List[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Dict = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ) + 1, len(A ) + 1 + len(A )) , ) UpperCAmelCase : int = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : List[Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (len(A ), len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[Any] = f''' {text}''' # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : str = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ) + 1, 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Union[str, Any] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , ) UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained( A , use_fast=A , add_prefix_space=A , trim_offsets=A ) UpperCAmelCase : Optional[int] = tokenizer_r(A , return_offsets_mapping=A , add_special_tokens=A ) self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(A )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(A ), 1 + len(A ) + 1 + len(A )) , )
265
0
'''simple docstring''' import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCamelCase__ = { """text_branch""": """text_model""", """audio_branch""": """audio_model.audio_encoder""", """attn""": """attention.self""", """self.proj""": """output.dense""", """attention.self_mask""": """attn_mask""", """mlp.fc1""": """intermediate.dense""", """mlp.fc2""": """output.dense""", """norm1""": """layernorm_before""", """norm2""": """layernorm_after""", """bn0""": """batch_norm""", } lowerCamelCase__ = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc') def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase=False ): _UpperCAmelCase : Dict = create_model( "HTSAT-tiny" , "roberta" , _lowercase , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=_lowercase , fusion_type="aff_2d" if enable_fusion else None , ) return model, model_cfg def __lowerCAmelCase (__lowerCAmelCase ): _UpperCAmelCase : str = {} _UpperCAmelCase : Dict = R""".*sequential.(\d+).*""" _UpperCAmelCase : Union[str, Any] = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _UpperCAmelCase : str = key.replace(_lowercase , _lowercase ) if re.match(_lowercase , _lowercase ): # replace sequential layers with list _UpperCAmelCase : Optional[int] = re.match(_lowercase , _lowercase ).group(1 ) _UpperCAmelCase : Optional[Any] = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(_lowercase )//3}.linear.""" ) elif re.match(_lowercase , _lowercase ): _UpperCAmelCase : Optional[Any] = int(re.match(_lowercase , _lowercase ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _UpperCAmelCase : List[Any] = 1 if projecton_layer == 0 else 2 _UpperCAmelCase : str = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _UpperCAmelCase : Any = value _UpperCAmelCase : Tuple = mixed_qkv.size(0 ) // 3 _UpperCAmelCase : Any = mixed_qkv[:qkv_dim] _UpperCAmelCase : List[str] = mixed_qkv[qkv_dim : qkv_dim * 2] _UpperCAmelCase : Dict = mixed_qkv[qkv_dim * 2 :] _UpperCAmelCase : Optional[Any] = query_layer _UpperCAmelCase : str = key_layer _UpperCAmelCase : str = value_layer else: _UpperCAmelCase : Any = value return model_state_dict def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ): _UpperCAmelCase : Any = init_clap(_lowercase , enable_fusion=_lowercase ) clap_model.eval() _UpperCAmelCase : Optional[int] = clap_model.state_dict() _UpperCAmelCase : Tuple = rename_state_dict(_lowercase ) _UpperCAmelCase : str = ClapConfig() _UpperCAmelCase : int = enable_fusion _UpperCAmelCase : List[str] = ClapModel(_lowercase ) # ignore the spectrogram embedding layer model.load_state_dict(_lowercase , strict=_lowercase ) model.save_pretrained(_lowercase ) transformers_config.save_pretrained(_lowercase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not') lowerCamelCase__ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
234
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, Pipeline, ZeroShotClassificationPipeline, pipeline, ) from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow from .test_pipelines_common import ANY # These 2 model types require different inputs than those of the usual text models. a : Tuple = {"""LayoutLMv2Config""", """LayoutLMv3Config"""} @is_pipeline_test class UpperCamelCase_ ( unittest.TestCase ): lowercase = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowercase = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING if model_mapping is not None: lowercase = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP} if tf_model_mapping is not None: lowercase = { config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP } def _lowercase( self , A , A , A ) -> Dict: UpperCAmelCase : Union[str, Any] = ZeroShotClassificationPipeline( model=A , tokenizer=A , candidate_labels=["""polics""", """health"""] ) return classifier, ["Who are you voting for in 2020?", "My stomach hurts."] def _lowercase( self , A , A ) -> Optional[int]: UpperCAmelCase : Dict = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # No kwarg UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , ["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : str = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) UpperCAmelCase : List[Any] = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Optional[int] = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] ) self.assertEqual( A , {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} ) self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 ) UpperCAmelCase : Dict = classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" ) self.assertEqual(A , {"""sequence""": ANY(A ), """labels""": [ANY(A )], """scores""": [ANY(A )]} ) # https://github.com/huggingface/transformers/issues/13846 UpperCAmelCase : str = classifier(["""I am happy"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(1 ) ] , ) UpperCAmelCase : List[str] = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] ) self.assertEqual( A , [ {"""sequence""": ANY(A ), """labels""": [ANY(A ), ANY(A )], """scores""": [ANY(A ), ANY(A )]} for i in range(2 ) ] , ) with self.assertRaises(A ): classifier("""""" , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier(A , candidate_labels="""politics""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" ) with self.assertRaises(A ): classifier("""Who are you voting for in 2020?""" , candidate_labels=A ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , ) with self.assertRaises(A ): classifier( """Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=A , ) self.run_entailment_id(A ) def _lowercase( self , A ) -> Any: UpperCAmelCase : Tuple = zero_shot_classifier.model.config UpperCAmelCase : Union[str, Any] = config.labelaid UpperCAmelCase : Tuple = zero_shot_classifier.entailment_id UpperCAmelCase : Any = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2} self.assertEqual(zero_shot_classifier.entailment_id , -1 ) UpperCAmelCase : Optional[Any] = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : Any = {"""ENTAIL""": 0, """NON-ENTAIL""": 1} self.assertEqual(zero_shot_classifier.entailment_id , 0 ) UpperCAmelCase : List[str] = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0} self.assertEqual(zero_shot_classifier.entailment_id , 2 ) UpperCAmelCase : Tuple = original_labelaid self.assertEqual(A , zero_shot_classifier.entailment_id ) @require_torch def _lowercase( self ) -> str: UpperCAmelCase : int = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) # There was a regression in 4.10 for this # Adding a test so we don't make the mistake again. # https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499 zero_shot_classifier( """Who are you voting for in 2020?""" * 100 , candidate_labels=["""politics""", """public health""", """science"""] ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , ) UpperCAmelCase : Union[str, Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @require_tf def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = pipeline( """zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , ) UpperCAmelCase : List[Any] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""science""", """public health""", """politics"""], """scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3], } , ) @slow @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[int] = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" ) UpperCAmelCase : Optional[int] = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : str = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , ) @slow @require_tf def _lowercase( self ) -> List[str]: UpperCAmelCase : int = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" ) UpperCAmelCase : Tuple = zero_shot_classifier( """Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] ) self.assertEqual( nested_simplify(A ) , { """sequence""": """Who are you voting for in 2020?""", """labels""": ["""politics""", """public health""", """science"""], """scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9], } , ) UpperCAmelCase : Any = zero_shot_classifier( """The dominant sequence transduction models are based on complex recurrent or convolutional neural networks""" """ in an encoder-decoder configuration. The best performing models also connect the encoder and decoder""" """ through an attention mechanism. We propose a new simple network architecture, the Transformer, based""" """ solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two""" """ machine translation tasks show these models to be superior in quality while being more parallelizable""" """ and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014""" """ English-to-German translation task, improving over the existing best results, including ensembles by""" """ over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new""" """ single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small""" """ fraction of the training costs of the best models from the literature. We show that the Transformer""" """ generalizes well to other tasks by applying it successfully to English constituency parsing both with""" """ large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=A , ) self.assertEqual( nested_simplify(A ) , { """sequence""": ( """The dominant sequence transduction models are based on complex recurrent or convolutional neural""" """ networks in an encoder-decoder configuration. The best performing models also connect the""" """ encoder and decoder through an attention mechanism. We propose a new simple network""" """ architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence""" """ and convolutions entirely. Experiments on two machine translation tasks show these models to be""" """ superior in quality while being more parallelizable and requiring significantly less time to""" """ train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,""" """ improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014""" """ English-to-French translation task, our model establishes a new single-model state-of-the-art""" """ BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training""" """ costs of the best models from the literature. We show that the Transformer generalizes well to""" """ other tasks by applying it successfully to English constituency parsing both with large and""" """ limited training data.""" ), """labels""": ["""translation""", """machine learning""", """vision""", """statistics"""], """scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8], } , )
265
0
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_roberta import RobertaTokenizer A_ : Dict = logging.get_logger(__name__) A_ : str = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} A_ : int = { """vocab_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/vocab.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/vocab.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/vocab.json""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json""" ), }, """merges_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/merges.txt""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/merges.txt""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/merges.txt""", """roberta-base-openai-detector""": """https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt""", """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt""" ), }, """tokenizer_file""": { """roberta-base""": """https://huggingface.co/roberta-base/resolve/main/tokenizer.json""", """roberta-large""": """https://huggingface.co/roberta-large/resolve/main/tokenizer.json""", """roberta-large-mnli""": """https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json""", """distilroberta-base""": """https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json""", """roberta-base-openai-detector""": ( """https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json""" ), """roberta-large-openai-detector""": ( """https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json""" ), }, } A_ : Optional[int] = { """roberta-base""": 512, """roberta-large""": 512, """roberta-large-mnli""": 512, """distilroberta-base""": 512, """roberta-base-openai-detector""": 512, """roberta-large-openai-detector""": 512, } class lowerCamelCase (A__ ): lowerCamelCase__ : Union[str, Any] = VOCAB_FILES_NAMES lowerCamelCase__ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCamelCase__ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase__ : Any = ['input_ids', 'attention_mask'] lowerCamelCase__ : Tuple = RobertaTokenizer def __init__( self : Optional[int] , __UpperCAmelCase : Tuple=None , __UpperCAmelCase : List[Any]=None , __UpperCAmelCase : int=None , __UpperCAmelCase : List[str]="replace" , __UpperCAmelCase : Optional[int]="<s>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : Union[str, Any]="</s>" , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : List[str]="<mask>" , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Any=True , **__UpperCAmelCase : Dict , ) -> List[Any]: super().__init__( __UpperCAmelCase , __UpperCAmelCase , tokenizer_file=__UpperCAmelCase , errors=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , add_prefix_space=__UpperCAmelCase , trim_offsets=__UpperCAmelCase , **__UpperCAmelCase , ) SCREAMING_SNAKE_CASE__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , pre_tok_state.pop("""type""" ) ) SCREAMING_SNAKE_CASE__ = add_prefix_space SCREAMING_SNAKE_CASE__ = pre_tok_class(**__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = add_prefix_space SCREAMING_SNAKE_CASE__ = """post_processor""" SCREAMING_SNAKE_CASE__ = getattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase ) if tokenizer_component_instance: SCREAMING_SNAKE_CASE__ = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: SCREAMING_SNAKE_CASE__ = tuple(state["""sep"""] ) if "cls" in state: SCREAMING_SNAKE_CASE__ = tuple(state["""cls"""] ) SCREAMING_SNAKE_CASE__ = False if state.get("""add_prefix_space""" , __UpperCAmelCase ) != add_prefix_space: SCREAMING_SNAKE_CASE__ = add_prefix_space SCREAMING_SNAKE_CASE__ = True if state.get("""trim_offsets""" , __UpperCAmelCase ) != trim_offsets: SCREAMING_SNAKE_CASE__ = trim_offsets SCREAMING_SNAKE_CASE__ = True if changes_to_apply: SCREAMING_SNAKE_CASE__ = getattr(__UpperCAmelCase , state.pop("""type""" ) ) SCREAMING_SNAKE_CASE__ = component_class(**__UpperCAmelCase ) setattr(self.backend_tokenizer , __UpperCAmelCase , __UpperCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str: if self._mask_token is None: if self.verbose: logger.error("""Using mask_token, but it is not set yet.""" ) return None return str(self._mask_token ) @mask_token.setter def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : Optional[int] ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else value SCREAMING_SNAKE_CASE__ = value def SCREAMING_SNAKE_CASE ( self : str , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Tuple ) -> BatchEncoding: SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Optional[Any] ) -> BatchEncoding: SCREAMING_SNAKE_CASE__ = kwargs.get("""is_split_into_words""" , __UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCAmelCase , **__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Optional[int] = None ) -> Tuple[str]: SCREAMING_SNAKE_CASE__ = self._tokenizer.model.save(__UpperCAmelCase , name=__UpperCAmelCase ) return tuple(__UpperCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Any=None ) -> Optional[Any]: SCREAMING_SNAKE_CASE__ = [self.bos_token_id] + token_ids_a + [self.eos_token_id] if token_ids_a is None: return output return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id] def SCREAMING_SNAKE_CASE ( self : Optional[Any] , __UpperCAmelCase : List[str] , __UpperCAmelCase : str = None ) -> List[int]: SCREAMING_SNAKE_CASE__ = [self.sep_token_id] SCREAMING_SNAKE_CASE__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
165
'''simple docstring''' import time from contextlib import contextmanager from pathlib import Path import pytest import requests from huggingface_hub.hf_api import HfApi, HfFolder a : List[Any] = """__DUMMY_TRANSFORMERS_USER__""" a : Tuple = """Dummy User""" a : Optional[Any] = """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""" a : Optional[Any] = """https://hub-ci.huggingface.co""" a : List[Any] = CI_HUB_ENDPOINT + """/datasets/{repo_id}/resolve/{revision}/{path}""" a : Tuple = CI_HUB_ENDPOINT + """/{repo_id}/resolve/{revision}/{filename}""" a : str = Path("""~/.huggingface/hub_ci_token""").expanduser() @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Optional[int]: monkeypatch.setattr( """huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , _lowercase ) monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , _lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase , _lowercase ) -> Optional[Any]: HfFolder.save_token(_lowercase ) yield HfFolder.delete_token() @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( ) -> str: return HfApi(endpoint=_lowercase ) @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : str = HfFolder.get_token() HfFolder.save_token(_lowercase ) yield CI_HUB_USER_TOKEN if previous_token is not None: HfFolder.save_token(_lowercase ) @pytest.fixture def __lowerCamelCase ( _lowercase ) -> Any: def _cleanup_repo(_lowercase ): hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) return _cleanup_repo @pytest.fixture def __lowerCamelCase ( _lowercase ) -> List[str]: @contextmanager def _temporary_repo(_lowercase ): try: yield repo_id finally: cleanup_repo(_lowercase ) return _temporary_repo @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: UpperCAmelCase : str = F'''repo_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[Any] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data/text_data.txt""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[int] = F'''repo_zipped_txt_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : Optional[int] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[str]: return hf_private_dataset_repo_zipped_txt_data_ @pytest.fixture(scope="""session""" ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Tuple: UpperCAmelCase : List[Any] = F'''repo_zipped_img_data-{int(time.time() * 10e3 )}''' UpperCAmelCase : List[str] = F'''{CI_HUB_USER}/{repo_name}''' hf_api.create_repo(_lowercase , token=_lowercase , repo_type="""dataset""" , private=_lowercase ) hf_api.upload_file( token=_lowercase , path_or_fileobj=str(_lowercase ) , path_in_repo="""data.zip""" , repo_id=_lowercase , repo_type="""dataset""" , ) yield repo_id try: hf_api.delete_repo(_lowercase , token=_lowercase , repo_type="""dataset""" ) except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error pass @pytest.fixture() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> List[Any]: return hf_private_dataset_repo_zipped_img_data_
265
0
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class lowercase__: """simple docstring""" def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_3 , SCREAMING_SNAKE_CASE_ : Dict=1_0 , SCREAMING_SNAKE_CASE_ : List[str]=3 , SCREAMING_SNAKE_CASE_ : List[Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]=2 , SCREAMING_SNAKE_CASE_ : Dict=2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : List[Any]=3_2 , SCREAMING_SNAKE_CASE_ : Optional[int]=5 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=4 , SCREAMING_SNAKE_CASE_ : List[Any]=3_7 , SCREAMING_SNAKE_CASE_ : Dict="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : str=1_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Any=0.9 , SCREAMING_SNAKE_CASE_ : List[str]=None , ) -> Optional[Any]: lowercase_ = parent lowercase_ = batch_size lowercase_ = image_size lowercase_ = num_channels lowercase_ = patch_size lowercase_ = tubelet_size lowercase_ = num_frames lowercase_ = is_training lowercase_ = use_labels lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = intermediate_size lowercase_ = hidden_act lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = type_sequence_label_size lowercase_ = initializer_range lowercase_ = mask_ratio lowercase_ = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame lowercase_ = (image_size // patch_size) ** 2 lowercase_ = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos lowercase_ = int(mask_ratio * self.seq_length ) def _lowercase ( self : List[Any] ) -> List[Any]: lowercase_ = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ = None if self.use_labels: lowercase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase_ = self.get_config() return config, pixel_values, labels def _lowercase ( self : List[str] ) -> Optional[Any]: return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE_ , initializer_range=self.initializer_range , ) def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Optional[Any]: lowercase_ = VideoMAEModel(config=SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() lowercase_ = model(SCREAMING_SNAKE_CASE_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[Any] ) -> Optional[int]: lowercase_ = VideoMAEForPreTraining(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase_ = torch.ones((self.num_masks,) ) lowercase_ = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) lowercase_ = mask.expand(self.batch_size , -1 ).bool() lowercase_ = model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # model only returns predictions for masked patches lowercase_ = mask.sum().item() lowercase_ = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def _lowercase ( self : Union[str, Any] ) -> Optional[int]: lowercase_ = self.prepare_config_and_inputs() lowercase_ = config_and_inputs lowercase_ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class lowercase__( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ): """simple docstring""" a :Optional[int] = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) a :Any = ( {'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification} if is_torch_available() else {} ) a :int = False a :str = False a :List[str] = False a :List[Any] = False def _lowercase ( self : Tuple ) -> Optional[Any]: lowercase_ = VideoMAEModelTester(self ) lowercase_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , has_text_modality=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 ) def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any]=False ) -> int: lowercase_ = copy.deepcopy(SCREAMING_SNAKE_CASE_ ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch lowercase_ = torch.ones((self.model_tester.num_masks,) ) lowercase_ = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) lowercase_ = mask.expand(self.model_tester.batch_size , -1 ).bool() lowercase_ = bool_masked_pos.to(SCREAMING_SNAKE_CASE_ ) if return_labels: if model_class in [ *get_values(SCREAMING_SNAKE_CASE_ ), ]: lowercase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ ) return inputs_dict def _lowercase ( self : Dict ) -> Tuple: self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def _lowercase ( self : Union[str, Any] ) -> int: pass def _lowercase ( self : List[str] ) -> Dict: lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase_ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE_ , nn.Linear ) ) def _lowercase ( self : int ) -> Dict: lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) lowercase_ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ = [*signature.parameters.keys()] lowercase_ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Dict ) -> Tuple: lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : str ) -> List[str]: lowercase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ ) @slow def _lowercase ( self : List[Any] ) -> int: for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ = VideoMAEModel.from_pretrained(SCREAMING_SNAKE_CASE_ ) self.assertIsNotNone(SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : List[Any] ) -> Optional[int]: if not self.has_attentions: pass else: lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ = True for model_class in self.all_model_classes: lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks lowercase_ = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) lowercase_ = True lowercase_ = False lowercase_ = True lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ = True lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase_ = len(SCREAMING_SNAKE_CASE_ ) # Check attention is always last and order is fine lowercase_ = True lowercase_ = True lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) self.assertEqual(out_len + 1 , len(SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.attentions self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def _lowercase ( self : List[Any] ) -> Any: def check_hidden_states_output(SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Any ): lowercase_ = model_class(SCREAMING_SNAKE_CASE_ ) model.to(SCREAMING_SNAKE_CASE_ ) model.eval() with torch.no_grad(): lowercase_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) lowercase_ = outputs.hidden_states lowercase_ = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) lowercase_ = self.model_tester.seq_length - self.model_tester.num_masks lowercase_ = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ = True check_hidden_states_output(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def _lowercase ( self : int ) -> str: pass def a ( ): '''simple docstring''' lowercase_ = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) lowercase_ = np.load(_lowercase ) return list(_lowercase ) @require_torch @require_vision class lowercase__( unittest.TestCase ): """simple docstring""" @cached_property def _lowercase ( self : str ) -> List[str]: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _lowercase ( self : Optional[Any] ) -> Optional[Any]: lowercase_ = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( SCREAMING_SNAKE_CASE_ ) lowercase_ = self.default_image_processor lowercase_ = prepare_video() lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits lowercase_ = torch.Size((1, 4_0_0) ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.tensor([0.36_69, -0.06_88, -0.24_21] ).to(SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) @slow def _lowercase ( self : List[Any] ) -> Union[str, Any]: lowercase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(SCREAMING_SNAKE_CASE_ ) lowercase_ = self.default_image_processor lowercase_ = prepare_video() lowercase_ = image_processor(SCREAMING_SNAKE_CASE_ , return_tensors='''pt''' ).to(SCREAMING_SNAKE_CASE_ ) # add boolean mask, indicating which patches to mask lowercase_ = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) lowercase_ = torch.load(SCREAMING_SNAKE_CASE_ ) # forward pass with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) # verify the logits lowercase_ = torch.Size([1, 1_4_0_8, 1_5_3_6] ) lowercase_ = torch.tensor( [[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] , device=SCREAMING_SNAKE_CASE_ ) self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) lowercase_ = torch.tensor([0.51_42] , device=SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) lowercase_ = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=SCREAMING_SNAKE_CASE_ ).to( SCREAMING_SNAKE_CASE_ ) with torch.no_grad(): lowercase_ = model(**SCREAMING_SNAKE_CASE_ ) lowercase_ = torch.tensor(torch.tensor([0.64_69] ) , device=SCREAMING_SNAKE_CASE_ ) self.assertTrue(torch.allclose(outputs.loss , SCREAMING_SNAKE_CASE_ , atol=1e-4 ) )
30
'''simple docstring''' from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax a : Union[str, Any] = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class UpperCamelCase_ ( __magic_name__ ): def __init__( self , **A ) -> List[str]: super().__init__(**A ) requires_backends(self , """vision""" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self , A , **A ) -> Optional[Any]: return super().__call__(A , **A ) def _lowercase( self , **A ) -> Optional[Any]: UpperCAmelCase : List[Any] = {} if "candidate_labels" in kwargs: UpperCAmelCase : Dict = kwargs["""candidate_labels"""] if "hypothesis_template" in kwargs: UpperCAmelCase : Optional[Any] = kwargs["""hypothesis_template"""] return preprocess_params, {}, {} def _lowercase( self , A , A=None , A="This is a photo of {}." ) -> Optional[Any]: UpperCAmelCase : int = load_image(A ) UpperCAmelCase : List[str] = self.image_processor(images=[image] , return_tensors=self.framework ) UpperCAmelCase : List[str] = candidate_labels UpperCAmelCase : Tuple = [hypothesis_template.format(A ) for x in candidate_labels] UpperCAmelCase : Union[str, Any] = self.tokenizer(A , return_tensors=self.framework , padding=A ) UpperCAmelCase : Union[str, Any] = [text_inputs] return inputs def _lowercase( self , A ) -> Optional[int]: UpperCAmelCase : List[Any] = model_inputs.pop("""candidate_labels""" ) UpperCAmelCase : Optional[Any] = model_inputs.pop("""text_inputs""" ) if isinstance(text_inputs[0] , A ): UpperCAmelCase : Optional[Any] = text_inputs[0] else: # Batching case. UpperCAmelCase : Any = text_inputs[0][0] UpperCAmelCase : Dict = self.model(**A , **A ) UpperCAmelCase : List[Any] = { """candidate_labels""": candidate_labels, """logits""": outputs.logits_per_image, } return model_outputs def _lowercase( self , A ) -> Union[str, Any]: UpperCAmelCase : int = model_outputs.pop("""candidate_labels""" ) UpperCAmelCase : int = model_outputs["""logits"""][0] if self.framework == "pt": UpperCAmelCase : Optional[int] = logits.softmax(dim=-1 ).squeeze(-1 ) UpperCAmelCase : Any = probs.tolist() if not isinstance(A , A ): UpperCAmelCase : Any = [scores] elif self.framework == "tf": UpperCAmelCase : List[str] = stable_softmax(A , axis=-1 ) UpperCAmelCase : Union[str, Any] = probs.numpy().tolist() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) UpperCAmelCase : Any = [ {"""score""": score, """label""": candidate_label} for score, candidate_label in sorted(zip(A , A ) , key=lambda A : -x[0] ) ] return result
265
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) __SCREAMING_SNAKE_CASE : int = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = ["""MBartTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : int = ["""MBartTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[int] = [ """MBART_PRETRAINED_MODEL_ARCHIVE_LIST""", """MBartForCausalLM""", """MBartForConditionalGeneration""", """MBartForQuestionAnswering""", """MBartForSequenceClassification""", """MBartModel""", """MBartPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Tuple = [ """TFMBartForConditionalGeneration""", """TFMBartModel""", """TFMBartPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE : Optional[Any] = [ """FlaxMBartForConditionalGeneration""", """FlaxMBartForQuestionAnswering""", """FlaxMBartForSequenceClassification""", """FlaxMBartModel""", """FlaxMBartPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart import MBartTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart_fast import MBartTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mbart import ( MBART_PRETRAINED_MODEL_ARCHIVE_LIST, MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, MBartPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_mbart import ( FlaxMBartForConditionalGeneration, FlaxMBartForQuestionAnswering, FlaxMBartForSequenceClassification, FlaxMBartModel, FlaxMBartPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
347
'''simple docstring''' from operator import delitem, getitem, setitem import pytest from data_structures.hashing.hash_map import HashMap def __lowerCamelCase ( _lowercase ) -> Optional[Any]: return getitem, k def __lowerCamelCase ( _lowercase , _lowercase ) -> List[str]: return setitem, k, v def __lowerCamelCase ( _lowercase ) -> int: return delitem, k def __lowerCamelCase ( _lowercase , _lowercase , *_lowercase ) -> Optional[Any]: try: return fun(_lowercase , *_lowercase ), None except Exception as e: return None, e a : List[str] = ( _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), ) a : List[Any] = [ _set("""key_a""", """val_a"""), _set("""key_a""", """val_b"""), ] a : int = [ _set("""key_a""", """val_a"""), _set("""key_b""", """val_b"""), _del("""key_a"""), _del("""key_b"""), _set("""key_a""", """val_a"""), _del("""key_a"""), ] a : List[Any] = [ _get("""key_a"""), _del("""key_a"""), _set("""key_a""", """val_a"""), _del("""key_a"""), _del("""key_a"""), _get("""key_a"""), ] a : Tuple = [ *[_set(x, x) for x in range(5)], # guaranteed upsize ] a : Optional[Any] = [ *[_set(x, x) for x in range(5)], # guaranteed upsize *[_del(x) for x in range(5)], _set("""key_a""", """val_b"""), ] @pytest.mark.parametrize( """operations""" , ( pytest.param(_add_items , id="""add items""" ), pytest.param(_overwrite_items , id="""overwrite items""" ), pytest.param(_delete_items , id="""delete items""" ), pytest.param(_access_absent_items , id="""access absent items""" ), pytest.param(_add_with_resize_up , id="""add with resize up""" ), pytest.param(_add_with_resize_down , id="""add with resize down""" ), ) , ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : List[str] = HashMap(initial_block_size=4 ) UpperCAmelCase : Dict = {} for _, (fun, *args) in enumerate(_lowercase ): UpperCAmelCase , UpperCAmelCase : Union[str, Any] = _run_operation(_lowercase , _lowercase , *_lowercase ) UpperCAmelCase , UpperCAmelCase : Any = _run_operation(_lowercase , _lowercase , *_lowercase ) assert my_res == py_res assert str(_lowercase ) == str(_lowercase ) assert set(_lowercase ) == set(_lowercase ) assert len(_lowercase ) == len(_lowercase ) assert set(my.items() ) == set(py.items() ) def __lowerCamelCase ( ) -> List[Any]: def is_public(_lowercase ) -> bool: return not name.startswith("""_""" ) UpperCAmelCase : int = {name for name in dir({} ) if is_public(_lowercase )} UpperCAmelCase : Any = {name for name in dir(HashMap() ) if is_public(_lowercase )} assert dict_public_names > hash_public_names
265
0
import unittest from diffusers import FlaxAutoencoderKL from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax from .test_modeling_common_flax import FlaxModelTesterMixin if is_flax_available(): import jax @require_flax class lowerCamelCase__ ( __lowercase , unittest.TestCase): '''simple docstring''' _A = FlaxAutoencoderKL @property def _lowerCamelCase ( self :Dict ) -> Tuple: __UpperCamelCase : List[str] = 4 __UpperCamelCase : Any = 3 __UpperCamelCase : Any = (3_2, 3_2) __UpperCamelCase : str = jax.random.PRNGKey(0 ) __UpperCamelCase : List[str] = jax.random.uniform(a , ((batch_size, num_channels) + sizes) ) return {"sample": image, "prng_key": prng_key} def _lowerCamelCase ( self :Any ) -> int: __UpperCamelCase : Optional[int] = { """block_out_channels""": [3_2, 6_4], """in_channels""": 3, """out_channels""": 3, """down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], """up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], """latent_channels""": 4, } __UpperCamelCase : int = self.dummy_input return init_dict, inputs_dict
232
'''simple docstring''' import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartTokenizer, MBartTokenizerFast, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, ) from ...test_tokenization_common import TokenizerTesterMixin a : Dict = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mbart.modeling_mbart import shift_tokens_right a : List[str] = 2_5_0_0_0_4 a : List[str] = 2_5_0_0_2_0 @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = MBartTokenizer lowercase = MBartTokenizerFast lowercase = True lowercase = True def _lowercase( self ) -> List[Any]: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : str = MBartTokenizer(A , keep_accents=A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self ) -> int: UpperCAmelCase : Optional[Any] = MBartTokenizer(A , keep_accents=A ) UpperCAmelCase : Tuple = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) UpperCAmelCase : Dict = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual( A , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] # ^ unk: 2 + 1 = 3 unk: 2 + 1 = 3 ^ ] , ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) def _lowercase( self ) -> Union[str, Any]: if not self.test_slow_tokenizer: # as we don't have a slow version, we can't compare the outputs between slow and fast versions return UpperCAmelCase : Tuple = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart""", {}) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): UpperCAmelCase : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : str = self.tokenizer_class.from_pretrained(A , **A ) UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Dict = tokenizer_r.save_pretrained(A ) UpperCAmelCase : int = tokenizer_p.save_pretrained(A ) # Checks it save with the same files + the tokenizer.json file for the fast one self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) UpperCAmelCase : int = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f ) self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[int] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Tuple = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) # self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key)) # self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id")) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=True UpperCAmelCase : Optional[int] = tempfile.mkdtemp() UpperCAmelCase : Any = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : Optional[int] = tokenizer_p.save_pretrained(A ) # Checks it save with the same files self.assertSequenceEqual(A , A ) # Checks everything loads correctly in the same way UpperCAmelCase : List[str] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : Any = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) # Save tokenizer rust, legacy_format=False UpperCAmelCase : Optional[Any] = tempfile.mkdtemp() UpperCAmelCase : Optional[Any] = tokenizer_r.save_pretrained(A , legacy_format=A ) UpperCAmelCase : List[str] = tokenizer_p.save_pretrained(A ) # Checks it saved the tokenizer.json file self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) ) # Checks everything loads correctly in the same way UpperCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(A ) UpperCAmelCase : str = tokenizer_p.from_pretrained(A ) # Check special tokens are set accordingly on Rust and Python for key in tokenizer_pp.special_tokens_map: self.assertTrue(hasattr(A , A ) ) shutil.rmtree(A ) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( unittest.TestCase ): lowercase = 'facebook/mbart-large-en-ro' lowercase = [ ' UN Chief Says There Is No Military Solution in Syria', ' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.', ] lowercase = [ 'Şeful ONU declară că nu există o soluţie militară în Siria', 'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei' ' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor' ' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.', ] lowercase = [8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2, EN_CODE] @classmethod def _lowercase( cls ) -> Tuple: UpperCAmelCase : MBartTokenizer = MBartTokenizer.from_pretrained( cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" ) UpperCAmelCase : int = 1 return cls def _lowercase( self ) -> Union[str, Any]: self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250001 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250004 ) self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250020 ) def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Optional[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , A ) def _lowercase( self ) -> List[str]: self.assertIn(A , self.tokenizer.all_special_ids ) UpperCAmelCase : str = [RO_CODE, 884, 9019, 96, 9, 916, 86792, 36, 18743, 15596, 5, 2] UpperCAmelCase : int = self.tokenizer.decode(A , skip_special_tokens=A ) UpperCAmelCase : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A ) self.assertEqual(A , A ) self.assertNotIn(self.tokenizer.eos_token , A ) def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[str] = ["""this is gunna be a long sentence """ * 20] assert isinstance(src_text[0] , A ) UpperCAmelCase : int = 10 UpperCAmelCase : List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0] self.assertEqual(ids[-2] , 2 ) self.assertEqual(ids[-1] , A ) self.assertEqual(len(A ) , A ) def _lowercase( self ) -> Tuple: self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250026, 250001] ) def _lowercase( self ) -> Dict: UpperCAmelCase : Any = tempfile.mkdtemp() UpperCAmelCase : Dict = self.tokenizer.fairseq_tokens_to_ids self.tokenizer.save_pretrained(A ) UpperCAmelCase : Tuple = MBartTokenizer.from_pretrained(A ) self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A ) @require_torch def _lowercase( self ) -> List[str]: UpperCAmelCase : int = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" ) UpperCAmelCase : Union[str, Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 assert batch.input_ids[1][-2:].tolist() == [2, EN_CODE] assert batch.decoder_input_ids[1][0].tolist() == RO_CODE assert batch.decoder_input_ids[1][-1] == 2 assert batch.labels[1][-2:].tolist() == [2, RO_CODE] @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : Union[str, Any] = self.tokenizer( self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , ) UpperCAmelCase : Optional[int] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id ) self.assertIsInstance(A , A ) self.assertEqual((2, 14) , batch.input_ids.shape ) self.assertEqual((2, 14) , batch.attention_mask.shape ) UpperCAmelCase : str = batch.input_ids.tolist()[0] self.assertListEqual(self.expected_src_tokens , A ) self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS # Test that special tokens are reset self.assertEqual(self.tokenizer.prefix_tokens , [] ) self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, EN_CODE] ) def _lowercase( self ) -> List[str]: UpperCAmelCase : Optional[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" ) UpperCAmelCase : Dict = self.tokenizer( text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" ) UpperCAmelCase : Dict = targets["""input_ids"""] UpperCAmelCase : Union[str, Any] = shift_tokens_right(A , self.tokenizer.pad_token_id ) self.assertEqual(batch.input_ids.shape[1] , 3 ) self.assertEqual(batch.decoder_input_ids.shape[1] , 10 ) @require_torch def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[Any] = self.tokenizer._build_translation_inputs( """A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" ) self.assertEqual( nested_simplify(A ) , { # A, test, EOS, en_XX """input_ids""": [[62, 3034, 2, 250004]], """attention_mask""": [[1, 1, 1, 1]], # ar_AR """forced_bos_token_id""": 250001, } , )
265
0
import unittest import numpy as np import torch from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad class _lowercase ( unittest.TestCase ): """simple docstring""" def UpperCamelCase_ (self ): """simple docstring""" a = 10 def UpperCamelCase_ (self ): """simple docstring""" a = [1, 2, 3, 4] a = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0] self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] a = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] self.assertEqual(truncate_or_pad(lowerCamelCase_ , self.block_size , 0 ) , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = """It was the year of Our Lord one thousand seven hundred and seventy-five.\n\nSpiritual revelations were conceded to England at that favoured period, as at this.""" a = process_story(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , [] ) def UpperCamelCase_ (self ): """simple docstring""" a = """""" a = process_story(lowerCamelCase_ ) self.assertEqual(lowerCamelCase_ , [] ) self.assertEqual(lowerCamelCase_ , [] ) def UpperCamelCase_ (self ): """simple docstring""" a = ( """It was the year of Our Lord one thousand seven hundred and """ """seventy-five\n\nSpiritual revelations were conceded to England """ """at that favoured period, as at this.\n@highlight\n\nIt was the best of times""" ) a = process_story(lowerCamelCase_ ) a = [ """It was the year of Our Lord one thousand seven hundred and seventy-five.""", """Spiritual revelations were conceded to England at that favoured period, as at this.""", ] self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) a = ["""It was the best of times."""] self.assertEqual(lowerCamelCase_ , lowerCamelCase_ ) def UpperCamelCase_ (self ): """simple docstring""" a = torch.tensor([1, 2, 3, 4] ) a = torch.tensor([1, 1, 1, 1] ) np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 0 ).numpy() , expected.numpy() ) def UpperCamelCase_ (self ): """simple docstring""" a = torch.tensor([1, 2, 3, 4, 23, 23, 23] ) a = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 23 ).numpy() , expected.numpy() ) def UpperCamelCase_ (self ): """simple docstring""" a = torch.tensor([8, 2, 3, 4, 1, 1, 1] ) a = torch.tensor([1, 1, 1, 1, 0, 0, 0] ) np.testing.assert_array_equal(build_mask(lowerCamelCase_ , 1 ).numpy() , expected.numpy() ) def UpperCamelCase_ (self ): """simple docstring""" a = 101 a = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] ) a = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] ) a = compute_token_type_ids(lowerCamelCase_ , lowerCamelCase_ ) np.testing.assert_array_equal(lowerCamelCase_ , lowerCamelCase_ )
227
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / """utils""")) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 a : List[str] = get_tests_dir("""fixtures""") class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: # A mock response for an HTTP head request to emulate server down UpperCAmelCase : Tuple = mock.Mock() UpperCAmelCase : List[str] = 500 UpperCAmelCase : Any = {} UpperCAmelCase : List[str] = HTTPError UpperCAmelCase : str = {} # Download this model to make sure it's in the cache. UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""" , return_value=A ) as mock_head: UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def _lowercase( self ) -> Any: # This test is for deprecated behavior and can be removed in v5 UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def _lowercase( self ) -> Union[str, Any]: with self.assertRaises(A ): # config is in subfolder, the following should not work without specifying the subfolder UpperCAmelCase : Any = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""" , subfolder="""feature_extractor""" ) self.assertIsNotNone(A ) @is_staging_test class UpperCamelCase_ ( unittest.TestCase ): @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = TOKEN HfFolder.save_token(A ) @classmethod def _lowercase( cls ) -> List[str]: try: delete_repo(token=cls._token , repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Union[str, Any] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Optional[int] = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""test-image-processor""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained(f'''{USER}/test-image-processor''' ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> List[str]: UpperCAmelCase : List[str] = ViTImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""valid_org/test-image-processor""" , use_auth_token=self._token ) UpperCAmelCase : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) # Reset repo delete_repo(token=self._token , repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( A , repo_id="""valid_org/test-image-processor-org""" , push_to_hub=A , use_auth_token=self._token ) UpperCAmelCase : int = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(A , getattr(A , A ) ) def _lowercase( self ) -> Optional[int]: CustomImageProcessor.register_for_auto_class() UpperCAmelCase : Optional[Any] = CustomImageProcessor.from_pretrained(A ) image_processor.push_to_hub("""test-dynamic-image-processor""" , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map , {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""} , ) UpperCAmelCase : Union[str, Any] = AutoImageProcessor.from_pretrained( f'''{USER}/test-dynamic-image-processor''' , trust_remote_code=A ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__ , """CustomImageProcessor""" )
265
0
import json import os import unittest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_ftfy, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase_ ( snake_case_ , unittest.TestCase ): '''simple docstring''' lowerCAmelCase = CLIPTokenizer lowerCAmelCase = CLIPTokenizerFast lowerCAmelCase = True lowerCAmelCase = {} lowerCAmelCase = False def _UpperCamelCase ( self ) -> Optional[int]: super().setUp() # fmt: off snake_case_ = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on snake_case_ = dict(zip(a , range(len(a ) ) ) ) snake_case_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>"""] snake_case_ = {"""unk_token""": """<unk>"""} snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) snake_case_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp: fp.write(json.dumps(a ) + '\n' ) with open(self.merges_file , 'w' , encoding='utf-8' ) as fp: fp.write('\n'.join(a ) ) def _UpperCamelCase ( self , **a ) -> str: kwargs.update(self.special_tokens_map ) return CLIPTokenizer.from_pretrained(self.tmpdirname , **a ) def _UpperCamelCase ( self , **a ) -> Union[str, Any]: kwargs.update(self.special_tokens_map ) return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **a ) def _UpperCamelCase ( self , a ) -> Optional[int]: snake_case_ = """lower newer""" snake_case_ = """lower newer""" return input_text, output_text def _UpperCamelCase ( self ) -> Any: snake_case_ = CLIPTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map ) snake_case_ = """lower newer""" snake_case_ = ["""lo""", """w""", """er</w>""", """n""", """e""", """w""", """er</w>"""] snake_case_ = tokenizer.tokenize(a ) self.assertListEqual(a , a ) snake_case_ = tokens + [tokenizer.unk_token] snake_case_ = [10, 2, 16, 9, 3, 2, 16, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a ) @require_ftfy def _UpperCamelCase ( self ) -> Dict: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = self.tokenizer_class.from_pretrained(a , **a ) snake_case_ = self.rust_tokenizer_class.from_pretrained(a , **a ) snake_case_ = """A\n'll 11p223RF☆ho!!to?'d'd''d of a cat to-$''d.""" snake_case_ = tokenizer_s.tokenize(a ) snake_case_ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) # Test that the tokenization is identical on an example containing a character (Latin Small Letter A # with Tilde) encoded in 2 different ways snake_case_ = """xa\u0303y""" + """ """ + """x\xe3y""" snake_case_ = tokenizer_s.tokenize(a ) snake_case_ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) # Test that the tokenization is identical on unicode of space type snake_case_ = [ """\u0009""", # (horizontal tab, '\t') """\u000B""", # (vertical tab) """\u000C""", # (form feed) """\u0020""", # (space, ' ') """\u200E""", # (left-to-right mark):w """\u200F""", # (right-to-left mark) ] for unicode_seq in spaces_unicodes: snake_case_ = tokenizer_s.tokenize(a ) snake_case_ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) # Test that the tokenization is identical on unicode of line break type snake_case_ = [ """\u000A""", # (line feed, '\n') """\r\n""", # (carriage return and line feed, '\r\n') """\u000D""", # (carriage return, '\r') """\r""", # (carriage return, '\r') """\u000D""", # (carriage return, '\r') """\u2028""", # (line separator) """\u2029""", # (paragraph separator) # "\u0085", # (next line) ] # The tokenization is not identical for the character "\u0085" (next line). The slow version using ftfy transforms # it into the Horizontal Ellipsis character "…" ("\u2026") while the fast version transforms it into a # space (and thus into an empty list). for unicode_seq in line_break_unicodes: snake_case_ = tokenizer_s.tokenize(a ) snake_case_ = tokenizer_r.tokenize(a ) self.assertListEqual(a , a ) def _UpperCamelCase ( self ) -> Union[str, Any]: # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): snake_case_ = """hello""" # `hello` is a token in the vocabulary of `pretrained_name` snake_case_ = F'''{text_of_1_token} {text_of_1_token}''' snake_case_ = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , ) snake_case_ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (0, len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (len(a ) + 1, len(a ) + 1 + len(a )) , ) snake_case_ = F''' {text}''' snake_case_ = self.rust_tokenizer_class.from_pretrained( a , use_fast=a , ) snake_case_ = tokenizer_r(a , return_offsets_mapping=a , add_special_tokens=a ) self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a )) ) self.assertEqual( encoding.offset_mapping[1] , (1 + len(a ) + 1, 1 + len(a ) + 1 + len(a )) , ) def _UpperCamelCase ( self ) -> Optional[Any]: # Test related to the breaking change introduced in transformers v4.17.0 # We need to check that an error in raised when the user try to load a previous version of the tokenizer. with self.assertRaises(a ) as context: self.rust_tokenizer_class.from_pretrained('robot-test/old-clip-tokenizer' ) self.assertTrue( context.exception.args[0].startswith( 'The `backend_tokenizer` provided does not match the expected format.' ) ) @require_ftfy def _UpperCamelCase ( self ) -> Any: super().test_tokenization_python_rust_equals() def _UpperCamelCase ( self ) -> int: # CLIP always lower cases letters pass
178
'''simple docstring''' # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def __lowerCamelCase ( _lowercase ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Any = create_tensor(_lowercase ) UpperCAmelCase : Union[str, Any] = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Optional[int]: UpperCAmelCase : Any = [state.process_index] UpperCAmelCase : Union[str, Any] = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, F'''{gathered_obj}, {len(_lowercase )} != {state.num_processes}''' assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}''' def __lowerCamelCase ( _lowercase ) -> List[Any]: UpperCAmelCase : Optional[int] = create_tensor(_lowercase ) UpperCAmelCase : List[str] = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def __lowerCamelCase ( _lowercase ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: UpperCAmelCase : Optional[Any] = torch.arange(state.num_processes + 1 ).to(state.device ) else: UpperCAmelCase : Tuple = torch.arange(state.num_processes ).to(state.device ) UpperCAmelCase : Optional[Any] = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def __lowerCamelCase ( _lowercase ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Optional[Any] = create_tensor(_lowercase ) UpperCAmelCase : Optional[Any] = reduce(_lowercase , """sum""" ) UpperCAmelCase : Optional[Any] = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return UpperCAmelCase : Tuple = create_tensor(_lowercase ) UpperCAmelCase : Optional[int] = reduce(_lowercase , """mean""" ) UpperCAmelCase : str = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), F'''{reduced_tensor} != {truth_tensor}''' def __lowerCamelCase ( _lowercase ) -> Optional[int]: # For xla_spawn (TPUs) main() def __lowerCamelCase ( ) -> int: UpperCAmelCase : List[Any] = PartialState() state.print(F'''State: {state}''' ) state.print("""testing gather""" ) test_gather(_lowercase ) state.print("""testing gather_object""" ) test_gather_object(_lowercase ) state.print("""testing broadcast""" ) test_broadcast(_lowercase ) state.print("""testing pad_across_processes""" ) test_pad_across_processes(_lowercase ) state.print("""testing reduce_sum""" ) test_reduce_sum(_lowercase ) state.print("""testing reduce_mean""" ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
265
0
import contextlib import copy import random from typing import Any, Dict, Iterable, Optional, Union import numpy as np import torch from .utils import deprecate, is_transformers_available if is_transformers_available(): import transformers def lowerCamelCase__ ( snake_case_ : Tuple ) -> Optional[int]: random.seed(_lowercase ) np.random.seed(_lowercase ) torch.manual_seed(_lowercase ) torch.cuda.manual_seed_all(_lowercase ) # ^^ safe to call this function even if cuda is not available class SCREAMING_SNAKE_CASE__ : def __init__(self : List[str] , a__ : str , a__ : str = 0.9_9_9_9 , a__ : Optional[Any] = 0.0 , a__ : Any = 0 , a__ : Union[str, Any] = False , a__ : Optional[int] = 1.0 , a__ : Dict = 2 / 3 , a__ : List[str] = None , a__ : str = None , **a__ : Tuple , ): """simple docstring""" if isinstance(a__ , torch.nn.Module ): __snake_case = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' , '''1.0.0''' , a__ , standard_warn=a__ , ) __snake_case = parameters.parameters() # set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility __snake_case = True if kwargs.get('''max_value''' , a__ ) is not None: __snake_case = """The `max_value` argument is deprecated. Please use `decay` instead.""" deprecate('''max_value''' , '''1.0.0''' , a__ , standard_warn=a__ ) __snake_case = kwargs["""max_value"""] if kwargs.get('''min_value''' , a__ ) is not None: __snake_case = """The `min_value` argument is deprecated. Please use `min_decay` instead.""" deprecate('''min_value''' , '''1.0.0''' , a__ , standard_warn=a__ ) __snake_case = kwargs["""min_value"""] __snake_case = list(a__ ) __snake_case = [p.clone().detach() for p in parameters] if kwargs.get('''device''' , a__ ) is not None: __snake_case = """The `device` argument is deprecated. Please use `to` instead.""" deprecate('''device''' , '''1.0.0''' , a__ , standard_warn=a__ ) self.to(device=kwargs['''device'''] ) __snake_case = None __snake_case = decay __snake_case = min_decay __snake_case = update_after_step __snake_case = use_ema_warmup __snake_case = inv_gamma __snake_case = power __snake_case = 0 __snake_case = None # set in `step()` __snake_case = model_cls __snake_case = model_config @classmethod def a (cls : str , a__ : Union[str, Any] , a__ : Optional[int] ): """simple docstring""" __snake_case = model_cls.load_config(a__ , return_unused_kwargs=a__ ) __snake_case = model_cls.from_pretrained(a__ ) __snake_case = cls(model.parameters() , model_cls=a__ , model_config=model.config ) ema_model.load_state_dict(a__ ) return ema_model def a (self : List[Any] , a__ : str ): """simple docstring""" if self.model_cls is None: raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' ) if self.model_config is None: raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' ) __snake_case = self.model_cls.from_config(self.model_config ) __snake_case = self.state_dict() state_dict.pop('''shadow_params''' , a__ ) model.register_to_config(**a__ ) self.copy_to(model.parameters() ) model.save_pretrained(a__ ) def a (self : int , a__ : int ): """simple docstring""" __snake_case = max(0 , optimization_step - self.update_after_step - 1 ) if step <= 0: return 0.0 if self.use_ema_warmup: __snake_case = 1 - (1 + step / self.inv_gamma) ** -self.power else: __snake_case = (1 + step) / (10 + step) __snake_case = min(a__ , self.decay ) # make sure decay is not smaller than min_decay __snake_case = max(a__ , self.min_decay ) return cur_decay_value @torch.no_grad() def a (self : List[Any] , a__ : List[str] ): """simple docstring""" if isinstance(a__ , torch.nn.Module ): __snake_case = ( """Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. """ """Please pass the parameters of the module instead.""" ) deprecate( '''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' , '''1.0.0''' , a__ , standard_warn=a__ , ) __snake_case = parameters.parameters() __snake_case = list(a__ ) self.optimization_step += 1 # Compute the decay factor for the exponential moving average. __snake_case = self.get_decay(self.optimization_step ) __snake_case = decay __snake_case = 1 - decay __snake_case = contextlib.nullcontext if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): import deepspeed for s_param, param in zip(self.shadow_params , a__ ): if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled(): __snake_case = deepspeed.zero.GatheredParameters(a__ , modifier_rank=a__ ) with context_manager(): if param.requires_grad: s_param.sub_(one_minus_decay * (s_param - param) ) else: s_param.copy_(a__ ) def a (self : Optional[int] , a__ : Dict ): """simple docstring""" __snake_case = list(a__ ) for s_param, param in zip(self.shadow_params , a__ ): param.data.copy_(s_param.to(param.device ).data ) def a (self : Optional[Any] , a__ : Any=None , a__ : str=None ): """simple docstring""" __snake_case = [ p.to(device=a__ , dtype=a__ ) if p.is_floating_point() else p.to(device=a__ ) for p in self.shadow_params ] def a (self : int ): """simple docstring""" return { "decay": self.decay, "min_decay": self.min_decay, "optimization_step": self.optimization_step, "update_after_step": self.update_after_step, "use_ema_warmup": self.use_ema_warmup, "inv_gamma": self.inv_gamma, "power": self.power, "shadow_params": self.shadow_params, } def a (self : Any , a__ : Optional[Any] ): """simple docstring""" __snake_case = [param.detach().cpu().clone() for param in parameters] def a (self : Any , a__ : Dict ): """simple docstring""" if self.temp_stored_params is None: raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' ) for c_param, param in zip(self.temp_stored_params , a__ ): param.data.copy_(c_param.data ) # Better memory-wise. __snake_case = None def a (self : List[str] , a__ : Optional[int] ): """simple docstring""" __snake_case = copy.deepcopy(a__ ) __snake_case = state_dict.get('''decay''' , self.decay ) if self.decay < 0.0 or self.decay > 1.0: raise ValueError('''Decay must be between 0 and 1''' ) __snake_case = state_dict.get('''min_decay''' , self.min_decay ) if not isinstance(self.min_decay , a__ ): raise ValueError('''Invalid min_decay''' ) __snake_case = state_dict.get('''optimization_step''' , self.optimization_step ) if not isinstance(self.optimization_step , a__ ): raise ValueError('''Invalid optimization_step''' ) __snake_case = state_dict.get('''update_after_step''' , self.update_after_step ) if not isinstance(self.update_after_step , a__ ): raise ValueError('''Invalid update_after_step''' ) __snake_case = state_dict.get('''use_ema_warmup''' , self.use_ema_warmup ) if not isinstance(self.use_ema_warmup , a__ ): raise ValueError('''Invalid use_ema_warmup''' ) __snake_case = state_dict.get('''inv_gamma''' , self.inv_gamma ) if not isinstance(self.inv_gamma , (float, int) ): raise ValueError('''Invalid inv_gamma''' ) __snake_case = state_dict.get('''power''' , self.power ) if not isinstance(self.power , (float, int) ): raise ValueError('''Invalid power''' ) __snake_case = state_dict.get('''shadow_params''' , a__ ) if shadow_params is not None: __snake_case = shadow_params if not isinstance(self.shadow_params , a__ ): raise ValueError('''shadow_params must be a list''' ) if not all(isinstance(a__ , torch.Tensor ) for p in self.shadow_params ): raise ValueError('''shadow_params must all be Tensors''' )
24
'''simple docstring''' import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging a : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase_ ( __magic_name__ ): def __init__( self , A , A , A , A , A , A , A , ) -> Optional[Any]: super().__init__() self.register_modules( vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , ) def _lowercase( self , A = "auto" ) -> List[Any]: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(A ) def _lowercase( self ) -> Dict: self.enable_attention_slicing(A ) @torch.no_grad() def __call__( self , A , A = 512 , A = 512 , A = 50 , A = 7.5 , A = None , A = 1 , A = 0.0 , A = None , A = None , A = "pil" , A = True , A = None , A = 1 , A = None , **A , ) -> List[Any]: if isinstance(A , A ): UpperCAmelCase : List[str] = 1 elif isinstance(A , A ): UpperCAmelCase : Dict = len(A ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(A )}.''' ) # get prompt text embeddings UpperCAmelCase : List[str] = self.tokenizer( A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) UpperCAmelCase : List[Any] = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: UpperCAmelCase : int = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) UpperCAmelCase : Tuple = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: UpperCAmelCase : Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = text_embeddings.shape UpperCAmelCase : List[str] = text_embeddings.repeat(1 , A , 1 ) UpperCAmelCase : List[Any] = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. UpperCAmelCase : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: UpperCAmelCase : List[str] if negative_prompt is None: UpperCAmelCase : Any = [""""""] elif type(A ) is not type(A ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !=''' f''' {type(A )}.''' ) elif isinstance(A , A ): UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(A ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: UpperCAmelCase : Any = negative_prompt UpperCAmelCase : Dict = text_input_ids.shape[-1] UpperCAmelCase : List[Any] = self.tokenizer( A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , ) UpperCAmelCase : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method UpperCAmelCase : int = uncond_embeddings.shape[1] UpperCAmelCase : List[Any] = uncond_embeddings.repeat(A , A , 1 ) UpperCAmelCase : List[str] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase : List[str] = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) UpperCAmelCase : str = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps UpperCAmelCase : Dict = torch.randn( A , generator=A , device="""cpu""" , dtype=A ).to(self.device ) UpperCAmelCase : int = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to( self.device ) else: UpperCAmelCase : int = torch.randn( A , generator=A , device=self.device , dtype=A ) UpperCAmelCase : int = torch.randn(A , generator=A , device=self.device , dtype=A ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) UpperCAmelCase : Optional[Any] = latents_reference.to(self.device ) UpperCAmelCase : Tuple = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images UpperCAmelCase : int = (latents_shape[3] - latents_shape_reference[3]) // 2 UpperCAmelCase : List[str] = (latents_shape[2] - latents_shape_reference[2]) // 2 UpperCAmelCase : Union[str, Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx UpperCAmelCase : Union[str, Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy UpperCAmelCase : Optional[int] = 0 if dx < 0 else dx UpperCAmelCase : List[str] = 0 if dy < 0 else dy UpperCAmelCase : Union[str, Any] = max(-dx , 0 ) UpperCAmelCase : List[Any] = max(-dy , 0 ) # import pdb # pdb.set_trace() UpperCAmelCase : str = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(A ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand UpperCAmelCase : Union[str, Any] = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler UpperCAmelCase : Optional[int] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] UpperCAmelCase : int = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) UpperCAmelCase : Optional[Any] = {} if accepts_eta: UpperCAmelCase : List[str] = eta for i, t in enumerate(self.progress_bar(A ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase : Optional[Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase : str = self.scheduler.scale_model_input(A , A ) # predict the noise residual UpperCAmelCase : Any = self.unet(A , A , encoder_hidden_states=A ).sample # perform guidance if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase : Any = noise_pred.chunk(2 ) UpperCAmelCase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase : Dict = self.scheduler.step(A , A , A , **A ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(A , A , A ) UpperCAmelCase : Union[str, Any] = 1 / 0.1_8_2_1_5 * latents UpperCAmelCase : Tuple = self.vae.decode(A ).sample UpperCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: UpperCAmelCase : int = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to( self.device ) UpperCAmelCase , UpperCAmelCase : int = self.safety_checker( images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: UpperCAmelCase : Any = None if output_type == "pil": UpperCAmelCase : int = self.numpy_to_pil(A ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
265
0
"""simple docstring""" import functools from typing import Any def lowercase ( a__ : str , a__ : Tuple ) -> bool: # Validation if not isinstance(_lowercase , _lowercase ) or len(_lowercase ) == 0: raise ValueError('''the string should be not empty string''' ) if not isinstance(_lowercase , _lowercase ) or not all( isinstance(_lowercase , _lowercase ) and len(_lowercase ) > 0 for item in words ): raise ValueError('''the words should be a list of non-empty strings''' ) # Build trie _UpperCamelCase = {} _UpperCamelCase = """WORD_KEEPER""" for word in words: _UpperCamelCase = trie for c in word: if c not in trie_node: _UpperCamelCase = {} _UpperCamelCase = trie_node[c] _UpperCamelCase = True _UpperCamelCase = len(_lowercase ) # Dynamic programming method @functools.cache def is_breakable(a__ : Union[str, Any] ) -> bool: if index == len_string: return True _UpperCamelCase = trie for i in range(_lowercase , _lowercase ): _UpperCamelCase = trie_node.get(string[i] , _lowercase ) if trie_node is None: return False if trie_node.get(_lowercase , _lowercase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
256
'''simple docstring''' from __future__ import annotations import unittest import numpy as np from transformers import BlipTextConfig from transformers.testing_utils import require_tf, slow from transformers.utils import is_tf_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask if is_tf_available(): import tensorflow as tf from transformers import TFBlipTextModel from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCamelCase_ : def __init__( self , A , A=12 , A=7 , A=True , A=True , A=True , A=99 , A=32 , A=32 , A=2 , A=4 , A=37 , A=0.1 , A=0.1 , A=512 , A=0.0_2 , A=0 , A=None , ) -> Any: UpperCAmelCase : Optional[Any] = parent UpperCAmelCase : str = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : int = use_input_mask UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : Dict = vocab_size UpperCAmelCase : str = hidden_size UpperCAmelCase : List[Any] = projection_dim UpperCAmelCase : Tuple = num_hidden_layers UpperCAmelCase : Dict = num_attention_heads UpperCAmelCase : Optional[Any] = intermediate_size UpperCAmelCase : Any = dropout UpperCAmelCase : List[Any] = attention_dropout UpperCAmelCase : Optional[Any] = max_position_embeddings UpperCAmelCase : Tuple = initializer_range UpperCAmelCase : Optional[Any] = scope UpperCAmelCase : Union[str, Any] = bos_token_id def _lowercase( self ) -> Tuple: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Union[str, Any] = None if self.use_input_mask: UpperCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) if input_mask is not None: UpperCAmelCase : Tuple = input_mask.numpy() UpperCAmelCase , UpperCAmelCase : int = input_mask.shape UpperCAmelCase : Optional[int] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(A ): UpperCAmelCase : Tuple = 1 UpperCAmelCase : Optional[Any] = 0 UpperCAmelCase : int = self.get_config() return config, input_ids, tf.convert_to_tensor(A ) def _lowercase( self ) -> int: return BlipTextConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : int = TFBlipTextModel(config=A ) UpperCAmelCase : Union[str, Any] = model(A , attention_mask=A , training=A ) UpperCAmelCase : int = model(A , training=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Dict = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = config_and_inputs UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = (TFBlipTextModel,) if is_tf_available() else () lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> int: UpperCAmelCase : Union[str, Any] = BlipTextModelTester(self ) UpperCAmelCase : List[str] = ConfigTester(self , config_class=A , hidden_size=37 ) def _lowercase( self ) -> Tuple: self.config_tester.run_common_tests() def _lowercase( self ) -> List[Any]: UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*A ) def _lowercase( self ) -> List[str]: pass def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""Blip does not use inputs_embeds""" ) def _lowercase( self ) -> Union[str, Any]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Optional[int]: pass @unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" ) def _lowercase( self ) -> Dict: pass @slow def _lowercase( self ) -> Dict: for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase : Any = TFBlipTextModel.from_pretrained(A ) self.assertIsNotNone(A ) def _lowercase( self , A=True ) -> str: super().test_pt_tf_model_equivalence(allow_missing_keys=A )
265
0
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) _UpperCAmelCase : Optional[int] ={ """iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""", """iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""", """iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""", """mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""", """mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""", """mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""", """mask_downscaling.0""": """mask_embed.conv1""", """mask_downscaling.1""": """mask_embed.layer_norm1""", """mask_downscaling.3""": """mask_embed.conv2""", """mask_downscaling.4""": """mask_embed.layer_norm2""", """mask_downscaling.6""": """mask_embed.conv3""", """point_embeddings""": """point_embed""", """pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""", """image_encoder""": """vision_encoder""", """neck.0""": """neck.conv1""", """neck.1""": """neck.layer_norm1""", """neck.2""": """neck.conv2""", """neck.3""": """neck.layer_norm2""", """patch_embed.proj""": """patch_embed.projection""", """.norm""": """.layer_norm""", """blocks""": """layers""", } def lowerCAmelCase ( lowerCAmelCase_ )-> Tuple: lowerCAmelCase_ : str = {} state_dict.pop('''pixel_mean''' , _lowercase ) state_dict.pop('''pixel_std''' , _lowercase ) lowerCAmelCase_ : Tuple = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*""" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: lowerCAmelCase_ : Union[str, Any] = key.replace(_lowercase , _lowercase ) if re.match(_lowercase , _lowercase ): lowerCAmelCase_ : Dict = int(re.match(_lowercase , _lowercase ).group(2 ) ) if layer_nb == 0: lowerCAmelCase_ : Tuple = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: lowerCAmelCase_ : Union[str, Any] = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: lowerCAmelCase_ : Tuple = key.replace('''layers.2''' , '''proj_out''' ) lowerCAmelCase_ : List[str] = value lowerCAmelCase_ : List[str] = model_state_dict[ """prompt_encoder.shared_embedding.positional_embedding""" ] return model_state_dict def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="ybelkada/segment-anything" )-> Optional[Any]: lowerCAmelCase_ : Dict = hf_hub_download(_lowercase , f"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: lowerCAmelCase_ : int = SamConfig() elif "sam_vit_l" in model_name: lowerCAmelCase_ : str = SamVisionConfig( hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) lowerCAmelCase_ : Dict = SamConfig( vision_config=_lowercase , ) elif "sam_vit_h" in model_name: lowerCAmelCase_ : str = SamVisionConfig( hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) lowerCAmelCase_ : Optional[int] = SamConfig( vision_config=_lowercase , ) lowerCAmelCase_ : Union[str, Any] = torch.load(_lowercase , map_location='''cpu''' ) lowerCAmelCase_ : Union[str, Any] = replace_keys(_lowercase ) lowerCAmelCase_ : Union[str, Any] = SamImageProcessor() lowerCAmelCase_ : Union[str, Any] = SamProcessor(image_processor=_lowercase ) lowerCAmelCase_ : Optional[int] = SamModel(_lowercase ) hf_model.load_state_dict(_lowercase ) lowerCAmelCase_ : Dict = hf_model.to('''cuda''' ) lowerCAmelCase_ : Optional[Any] = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png""" lowerCAmelCase_ : List[Any] = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ).convert('''RGB''' ) lowerCAmelCase_ : List[Any] = [[[400, 650]]] lowerCAmelCase_ : Any = [[1]] lowerCAmelCase_ : Optional[Any] = processor(images=np.array(_lowercase ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase_ : Dict = hf_model(**_lowercase ) lowerCAmelCase_ : str = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 lowerCAmelCase_ : Any = processor( images=np.array(_lowercase ) , input_points=_lowercase , input_labels=_lowercase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase_ : List[str] = hf_model(**_lowercase ) lowerCAmelCase_ : Optional[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 lowerCAmelCase_ : Optional[int] = ((75, 275, 1_725, 850),) lowerCAmelCase_ : Optional[int] = processor(images=np.array(_lowercase ) , input_boxes=_lowercase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase_ : int = hf_model(**_lowercase ) lowerCAmelCase_ : List[str] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. lowerCAmelCase_ : Any = [[[400, 650], [800, 650]]] lowerCAmelCase_ : Optional[int] = [[1, 1]] lowerCAmelCase_ : List[str] = processor( images=np.array(_lowercase ) , input_points=_lowercase , input_labels=_lowercase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): lowerCAmelCase_ : Optional[int] = hf_model(**_lowercase ) lowerCAmelCase_ : Optional[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": _UpperCAmelCase : Optional[int] =argparse.ArgumentParser() _UpperCAmelCase : Any =["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""] parser.add_argument( """--model_name""", default="""sam_vit_h_4b8939""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether to push the model and processor to the hub after converting""", ) parser.add_argument( """--model_hub_id""", default="""ybelkada/segment-anything""", choices=choices, type=str, help="""Path to hf config.json of model to convert""", ) _UpperCAmelCase : Dict =parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
262
'''simple docstring''' import contextlib import importlib import io import unittest import transformers # Try to import everything from transformers to ensure every object can be loaded. from transformers import * # noqa F406 from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, require_tf, require_torch from transformers.utils import ContextManagers, find_labels, is_flax_available, is_tf_available, is_torch_available if is_torch_available(): from transformers import BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification if is_tf_available(): from transformers import TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification if is_flax_available(): from transformers import FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification a : str = DUMMY_UNKNOWN_IDENTIFIER # An actual model hosted on huggingface.co a : int = """main""" # Default branch name a : Any = """f2c752cfc5c0ab6f4bdec59acea69eefbee381c2""" # One particular commit (not the top of `main`) a : str = """aaaaaaa""" # This commit does not exist, so we should 404. a : int = """d9e9f15bc825e4b2c9249e9578f884bbcb5e3684""" # Sha-1 of config.json on the top of `main`, for checking purposes a : Any = """4b243c475af8d0a7754e87d7d096c92e5199ec2fe168a2ee7998e3b8e9bcb1d3""" @contextlib.contextmanager def __lowerCamelCase ( ) -> List[str]: print("""Welcome!""" ) yield print("""Bye!""" ) @contextlib.contextmanager def __lowerCamelCase ( ) -> Optional[int]: print("""Bonjour!""" ) yield print("""Au revoir!""" ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> List[Any]: # If the spec is missing, importlib would not be able to import the module dynamically. assert transformers.__spec__ is not None assert importlib.util.find_spec("""transformers""" ) is not None class UpperCamelCase_ ( unittest.TestCase ): @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Tuple: with ContextManagers([] ): print("""Transformers are awesome!""" ) # The print statement adds a new line at the end of the output self.assertEqual(mock_stdout.getvalue() , """Transformers are awesome!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Dict: with ContextManagers([context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Welcome!\nTransformers are awesome!\nBye!\n""" ) @unittest.mock.patch("""sys.stdout""" , new_callable=io.StringIO ) def _lowercase( self , A ) -> Union[str, Any]: with ContextManagers([context_fr(), context_en()] ): print("""Transformers are awesome!""" ) # The output should be wrapped with an English and French welcome and goodbye self.assertEqual(mock_stdout.getvalue() , """Bonjour!\nWelcome!\nTransformers are awesome!\nBye!\nAu revoir!\n""" ) @require_torch def _lowercase( self ) -> Optional[int]: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_tf def _lowercase( self ) -> int: self.assertEqual(find_labels(A ) , ["""labels"""] ) self.assertEqual(find_labels(A ) , ["""labels""", """next_sentence_label"""] ) self.assertEqual(find_labels(A ) , ["""start_positions""", """end_positions"""] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , ["""labels"""] ) @require_flax def _lowercase( self ) -> Any: # Flax models don't have labels self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) self.assertEqual(find_labels(A ) , [] ) class UpperCamelCase_ ( __magic_name__ ): pass self.assertEqual(find_labels(A ) , [] )
265
0
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version lowercase_ : Dict = logging.getLogger(__name__) require_version('pytorch_lightning>=1.0.4') lowercase_ : Union[str, Any] = { """base""": AutoModel, """sequence-classification""": AutoModelForSequenceClassification, """question-answering""": AutoModelForQuestionAnswering, """pretraining""": AutoModelForPreTraining, """token-classification""": AutoModelForTokenClassification, """language-modeling""": AutoModelWithLMHead, """summarization""": AutoModelForSeqaSeqLM, """translation""": AutoModelForSeqaSeqLM, } # update this and the import above to support new schedulers from transformers.optimization lowercase_ : Union[str, Any] = { """linear""": get_linear_schedule_with_warmup, """cosine""": get_cosine_schedule_with_warmup, """cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup, """polynomial""": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } lowercase_ : int = sorted(arg_to_scheduler.keys()) lowercase_ : str = """{""" + """, """.join(arg_to_scheduler_choices) + """}""" class __lowerCAmelCase ( pl.LightningModule ): def __init__( self : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any]=None , snake_case__ : List[Any]="base" , snake_case__ : List[Any]=None , snake_case__ : List[str]=None , snake_case__ : List[Any]=None , **snake_case__ : List[Any] , ): """simple docstring""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(snake_case__ ) _UpperCAmelCase = 0 _UpperCAmelCase = Path(self.hparams.output_dir ) _UpperCAmelCase = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: _UpperCAmelCase = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=snake_case__ , **snake_case__ , ) else: _UpperCAmelCase = config _UpperCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(self.hparams , snake_case__ , snake_case__ ): assert hasattr(self.config , snake_case__ ), F"""model config doesn\'t have a `{p}` attribute""" setattr(self.config , snake_case__ , getattr(self.hparams , snake_case__ ) ) if tokenizer is None: _UpperCAmelCase = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=snake_case__ , ) else: _UpperCAmelCase = tokenizer _UpperCAmelCase = MODEL_MODES[mode] if model is None: _UpperCAmelCase = self.model_type.from_pretrained( self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=snake_case__ , ) else: _UpperCAmelCase = model def UpperCamelCase ( self : Optional[int] , *snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ): """simple docstring""" _UpperCAmelCase = self.model_type.from_pretrained(*snake_case__ , **snake_case__ ) def UpperCamelCase ( self : int ): """simple docstring""" _UpperCAmelCase = arg_to_scheduler[self.hparams.lr_scheduler] _UpperCAmelCase = get_schedule_func( self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() ) _UpperCAmelCase = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1} return scheduler def UpperCamelCase ( self : str ): """simple docstring""" _UpperCAmelCase = self.model _UpperCAmelCase = ["""bias""", """LayerNorm.weight"""] _UpperCAmelCase = [ { """params""": [ p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay ) ], # check this named paramters """weight_decay""": self.hparams.weight_decay, }, { """params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )], """weight_decay""": 0.0, }, ] if self.hparams.adafactor: _UpperCAmelCase = Adafactor( snake_case__ , lr=self.hparams.learning_rate , scale_parameter=snake_case__ , relative_step=snake_case__ ) else: _UpperCAmelCase = AdamW( snake_case__ , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon ) _UpperCAmelCase = optimizer _UpperCAmelCase = self.get_lr_scheduler() return [optimizer], [scheduler] def UpperCamelCase ( self : int , snake_case__ : Any , snake_case__ : Union[str, Any] ): """simple docstring""" return self.validation_step(snake_case__ , snake_case__ ) def UpperCamelCase ( self : Optional[Any] , snake_case__ : Dict ): """simple docstring""" return self.validation_end(snake_case__ ) def UpperCamelCase ( self : Union[str, Any] ): """simple docstring""" _UpperCAmelCase = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores _UpperCAmelCase = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def UpperCamelCase ( self : Dict , snake_case__ : str ): """simple docstring""" if stage == "test": _UpperCAmelCase = len(self.test_dataloader().dataset ) else: _UpperCAmelCase = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=snake_case__ ) _UpperCAmelCase = len(self.train_dataloader().dataset ) def UpperCamelCase ( self : Any , snake_case__ : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Tuple = False ): """simple docstring""" raise NotImplementedError("You must implement this for your task" ) def UpperCamelCase ( self : Optional[int] ): """simple docstring""" return self.train_loader def UpperCamelCase ( self : Dict ): """simple docstring""" return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=snake_case__ ) def UpperCamelCase ( self : List[str] ): """simple docstring""" return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=snake_case__ ) def UpperCamelCase ( self : Dict , snake_case__ : Union[str, Any] ): """simple docstring""" return os.path.join( self.hparams.data_dir , "cached_{}_{}_{}".format( snake_case__ , list(filter(snake_case__ , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , ) @pl.utilities.rank_zero_only def UpperCamelCase ( self : Tuple , snake_case__ : Tuple ): """simple docstring""" _UpperCAmelCase = self.output_dir.joinpath("best_tfmr" ) _UpperCAmelCase = self.step_count self.model.save_pretrained(snake_case__ ) self.tokenizer.save_pretrained(snake_case__ ) @staticmethod def UpperCamelCase ( snake_case__ : List[Any] , snake_case__ : List[Any] ): """simple docstring""" parser.add_argument( "--model_name_or_path" , default=snake_case__ , type=snake_case__ , required=snake_case__ , help="Path to pretrained model or model identifier from huggingface.co/models" , ) parser.add_argument( "--config_name" , default="" , type=snake_case__ , help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name" , default=snake_case__ , type=snake_case__ , help="Pretrained tokenizer name or path if not the same as model_name" , ) parser.add_argument( "--cache_dir" , default=str(Path(snake_case__ ).parent / "test_run" / "cache" ) , type=snake_case__ , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , ) parser.add_argument( "--encoder_layerdrop" , type=snake_case__ , help="Encoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--decoder_layerdrop" , type=snake_case__ , help="Decoder layer dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--dropout" , type=snake_case__ , help="Dropout probability (Optional). Goes into model.config" , ) parser.add_argument( "--attention_dropout" , type=snake_case__ , help="Attention dropout probability (Optional). Goes into model.config" , ) parser.add_argument("--learning_rate" , default=5e-5 , type=snake_case__ , help="The initial learning rate for Adam." ) parser.add_argument( "--lr_scheduler" , default="linear" , choices=snake_case__ , metavar=snake_case__ , type=snake_case__ , help="Learning rate scheduler" , ) parser.add_argument("--weight_decay" , default=0.0 , type=snake_case__ , help="Weight decay if we apply some." ) parser.add_argument("--adam_epsilon" , default=1e-8 , type=snake_case__ , help="Epsilon for Adam optimizer." ) parser.add_argument("--warmup_steps" , default=0 , type=snake_case__ , help="Linear warmup over warmup_steps." ) parser.add_argument("--num_workers" , default=4 , type=snake_case__ , help="kwarg passed to DataLoader" ) parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=snake_case__ ) parser.add_argument("--train_batch_size" , default=32 , type=snake_case__ ) parser.add_argument("--eval_batch_size" , default=32 , type=snake_case__ ) parser.add_argument("--adafactor" , action="store_true" ) class __lowerCAmelCase ( pl.Callback ): def UpperCamelCase ( self : List[str] , snake_case__ : Any , snake_case__ : Tuple ): """simple docstring""" if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class __lowerCAmelCase ( pl.Callback ): def UpperCamelCase ( self : int , snake_case__ : List[Any] , snake_case__ : Union[str, Any] ): """simple docstring""" for name, param in pl_module.model.rag.named_parameters(): if param.grad is None: print(snake_case__ ) class __lowerCAmelCase ( pl.Callback ): def UpperCamelCase ( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int] ): """simple docstring""" _UpperCAmelCase = trainer.lr_schedulers[0]["""scheduler"""] _UpperCAmelCase = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )} pl_module.logger.log_metrics(snake_case__ ) def UpperCamelCase ( self : Dict , snake_case__ : List[str] , snake_case__ : Any ): """simple docstring""" rank_zero_info("***** Validation results *****" ) _UpperCAmelCase = trainer.callback_metrics # Log results for key in sorted(snake_case__ ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(snake_case__ , str(metrics[key] ) ) ) def UpperCamelCase ( self : Any , snake_case__ : Tuple , snake_case__ : Optional[int] ): """simple docstring""" rank_zero_info("***** Test results *****" ) _UpperCAmelCase = trainer.callback_metrics # Log and save results to file _UpperCAmelCase = os.path.join(pl_module.hparams.output_dir , "test_results.txt" ) with open(snake_case__ , "w" ) as writer: for key in sorted(snake_case__ ): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(snake_case__ , str(metrics[key] ) ) ) writer.write("{} = {}\n".format(snake_case__ , str(metrics[key] ) ) ) def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ ): '''simple docstring''' parser.add_argument( "--output_dir" , default=str(Path(_lowercase ).parent / "test_run" / "model_checkpoints" ) , type=_lowercase , help="The output directory where the model predictions and checkpoints will be written." , ) parser.add_argument( "--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , ) parser.add_argument( "--fp16_opt_level" , type=_lowercase , default="O2" , help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ) , ) parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=_lowercase ) parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=_lowercase , help="Max gradient norm" ) parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." ) parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." ) parser.add_argument( "--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=_lowercase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , ) parser.add_argument("--seed" , type=_lowercase , default=42 , help="random seed for initialization" ) parser.add_argument( "--data_dir" , default=str(Path(_lowercase ).parent / "test_run" / "dummy-train-data" ) , type=_lowercase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , ) def __SCREAMING_SNAKE_CASE ( snake_case_ , snake_case_ , snake_case_=None , snake_case_=True , snake_case_=[] , snake_case_=None , snake_case_=None , **snake_case_ , ): '''simple docstring''' pl.seed_everything(args.seed ) # init model _UpperCAmelCase = Path(model.hparams.output_dir ) odir.mkdir(exist_ok=_lowercase ) # add custom checkpoints if checkpoint_callback is None: _UpperCAmelCase = pl.callbacks.ModelCheckpoint( filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(_lowercase ) if logging_callback is None: _UpperCAmelCase = LoggingCallback() _UpperCAmelCase = {} if args.fpaa: _UpperCAmelCase = 16 if args.gpus > 1: _UpperCAmelCase = """auto""" _UpperCAmelCase = """ddp""" _UpperCAmelCase = args.accumulate_grad_batches _UpperCAmelCase = None _UpperCAmelCase = """auto""" _UpperCAmelCase = pl.Trainer.from_argparse_args( _lowercase , weights_summary=_lowercase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=_lowercase , val_check_interval=1 , num_sanity_val_steps=2 , **_lowercase , ) if args.do_train: trainer.fit(_lowercase ) else: print("RAG modeling tests with new set functions successfuly executed!" ) return trainer
133
'''simple docstring''' from itertools import count def __lowerCamelCase ( _lowercase = 5_0 ) -> int: UpperCAmelCase : Any = [1] * min_block_length for n in count(_lowercase ): fill_count_functions.append(1 ) for block_length in range(_lowercase , n + 1 ): for block_start in range(n - block_length ): fill_count_functions[n] += fill_count_functions[ n - block_start - block_length - 1 ] fill_count_functions[n] += 1 if fill_count_functions[n] > 1_0_0_0_0_0_0: break return n if __name__ == "__main__": print(F'''{solution() = }''')
265
0
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCAmelCase (): raise RuntimeError("CUDA out of memory." ) class lowerCAmelCase__ ( nn.Module ): def __init__( self : Union[str, Any] ) ->Any: '''simple docstring''' super().__init__() _UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) _UpperCAmelCase : Tuple = nn.BatchNormad(4 ) _UpperCAmelCase : int = nn.Linear(4 , 5 ) def lowerCAmelCase__ ( self : int , lowerCamelCase__ : Optional[int] ) ->Any: '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(lowerCamelCase__ ) ) ) class lowerCAmelCase__ ( unittest.TestCase ): def lowerCAmelCase__ ( self : Optional[Any] ) ->Optional[Any]: '''simple docstring''' _UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase__ : Optional[int] ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase__ ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(lowerCamelCase__ , [1_28, 64, 32, 16, 8] ) def lowerCAmelCase__ ( self : Union[str, Any] ) ->Any: '''simple docstring''' _UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase__ : int , lowerCamelCase__ : Any ): nonlocal batch_sizes batch_sizes.append(lowerCamelCase__ ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga _UpperCAmelCase : Optional[int] = mock_training_loop_function("hello" ) self.assertListEqual(lowerCamelCase__ , [1_28, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, "hello"] ) def lowerCAmelCase__ ( self : int ) ->Any: '''simple docstring''' @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(lowerCamelCase__ : Union[str, Any] ): pass with self.assertRaises(lowerCamelCase__ ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def lowerCAmelCase__ ( self : List[Any] ) ->Optional[int]: '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase__ : Optional[Any] ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(lowerCamelCase__ ) as cm: mock_training_loop_function() self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] ) def lowerCAmelCase__ ( self : List[Any] ) ->Optional[Any]: '''simple docstring''' @find_executable_batch_size(starting_batch_size=1_28 ) def mock_training_loop_function(lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(lowerCamelCase__ ) as cm: mock_training_loop_function(1_28 , "hello" , "world" ) self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] ) self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] ) def lowerCAmelCase__ ( self : Dict ) ->int: '''simple docstring''' @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(lowerCamelCase__ : Optional[Any] ): raise ValueError("Oops, we had an error!" ) with self.assertRaises(lowerCamelCase__ ) as cm: mock_training_loop_function() self.assertIn("Oops, we had an error!" , cm.exception.args[0] ) @require_cuda def lowerCAmelCase__ ( self : int ) ->Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() _UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , lowerCamelCase__ ) _UpperCAmelCase : Tuple = release_memory(lowerCamelCase__ ) self.assertEqual(torch.cuda.memory_allocated() , lowerCamelCase__ )
234
'''simple docstring''' from __future__ import annotations import math class UpperCamelCase_ : def __init__( self , A ) -> None: UpperCAmelCase : Optional[int] = size # approximate the overall size of segment tree with given value UpperCAmelCase : Optional[int] = [0 for i in range(0 , 4 * size )] # create array to store lazy update UpperCAmelCase : Any = [0 for i in range(0 , 4 * size )] UpperCAmelCase : Tuple = [0 for i in range(0 , 4 * size )] # flag for lazy update def _lowercase( self , A ) -> int: return idx * 2 def _lowercase( self , A ) -> int: return idx * 2 + 1 def _lowercase( self , A , A , A , A ) -> None: if left_element == right_element: UpperCAmelCase : str = a[left_element - 1] else: UpperCAmelCase : Tuple = (left_element + right_element) // 2 self.build(self.left(A ) , A , A , A ) self.build(self.right(A ) , mid + 1 , A , A ) UpperCAmelCase : str = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) def _lowercase( self , A , A , A , A , A , A ) -> bool: if self.flag[idx] is True: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : int = False if left_element != right_element: UpperCAmelCase : List[str] = self.lazy[idx] UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : int = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: UpperCAmelCase : Optional[Any] = val if left_element != right_element: UpperCAmelCase : Tuple = val UpperCAmelCase : int = val UpperCAmelCase : Any = True UpperCAmelCase : str = True return True UpperCAmelCase : str = (left_element + right_element) // 2 self.update(self.left(A ) , A , A , A , A , A ) self.update(self.right(A ) , mid + 1 , A , A , A , A ) UpperCAmelCase : List[str] = max( self.segment_tree[self.left(A )] , self.segment_tree[self.right(A )] ) return True def _lowercase( self , A , A , A , A , A ) -> int | float: if self.flag[idx] is True: UpperCAmelCase : Any = self.lazy[idx] UpperCAmelCase : Any = False if left_element != right_element: UpperCAmelCase : Optional[Any] = self.lazy[idx] UpperCAmelCase : Tuple = self.lazy[idx] UpperCAmelCase : List[str] = True UpperCAmelCase : Tuple = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] UpperCAmelCase : Dict = (left_element + right_element) // 2 UpperCAmelCase : List[Any] = self.query(self.left(A ) , A , A , A , A ) UpperCAmelCase : str = self.query(self.right(A ) , mid + 1 , A , A , A ) return max(A , A ) def __str__( self ) -> str: return str([self.query(1 , 1 , self.size , A , A ) for i in range(1 , self.size + 1 )] ) if __name__ == "__main__": a : Optional[int] = [1, 2, -4, 7, 3, -5, 6, 1_1, -2_0, 9, 1_4, 1_5, 5, 2, -8] a : Optional[Any] = 1_5 a : Union[str, Any] = SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 1_1)) print(segt.query(1, 1, size, 7, 1_2)) segt.update(1, 1, size, 1, 3, 1_1_1) print(segt.query(1, 1, size, 1, 1_5)) segt.update(1, 1, size, 7, 8, 2_3_5) print(segt)
265
0
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class lowerCamelCase (A__ ,unittest.TestCase ): lowerCamelCase__ : str = DiTPipeline lowerCamelCase__ : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS lowerCamelCase__ : str = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } lowerCamelCase__ : int = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS lowerCamelCase__ : Tuple = False def SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]: torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = TransformeraDModel( sample_size=1_6 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=__UpperCAmelCase , activation_fn="""gelu-approximate""" , num_embeds_ada_norm=1_0_0_0 , norm_type="""ada_norm_zero""" , norm_elementwise_affine=__UpperCAmelCase , ) SCREAMING_SNAKE_CASE__ = AutoencoderKL() SCREAMING_SNAKE_CASE__ = DDIMScheduler() SCREAMING_SNAKE_CASE__ = {"""transformer""": transformer.eval(), """vae""": vae.eval(), """scheduler""": scheduler} return components def SCREAMING_SNAKE_CASE ( self : Dict , __UpperCAmelCase : int , __UpperCAmelCase : List[Any]=0 ) -> str: if str(__UpperCAmelCase ).startswith("""mps""" ): SCREAMING_SNAKE_CASE__ = torch.manual_seed(__UpperCAmelCase ) else: SCREAMING_SNAKE_CASE__ = torch.Generator(device=__UpperCAmelCase ).manual_seed(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = { """class_labels""": [1], """generator""": generator, """num_inference_steps""": 2, """output_type""": """numpy""", } return inputs def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: SCREAMING_SNAKE_CASE__ = """cpu""" SCREAMING_SNAKE_CASE__ = self.get_dummy_components() SCREAMING_SNAKE_CASE__ = self.pipeline_class(**__UpperCAmelCase ) pipe.to(__UpperCAmelCase ) pipe.set_progress_bar_config(disable=__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = pipe(**__UpperCAmelCase ).images SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 1_6, 1_6, 3) ) SCREAMING_SNAKE_CASE__ = np.array([0.2_946, 0.6_601, 0.4_329, 0.3_296, 0.4_144, 0.5_319, 0.7_273, 0.5_013, 0.4_457] ) SCREAMING_SNAKE_CASE__ = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(__UpperCAmelCase , 1e-3 ) def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]: self._test_inference_batch_single_identical(relax_max_difference=__UpperCAmelCase , expected_max_diff=1e-3 ) @unittest.skipIf( torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @require_torch_gpu @slow class lowerCamelCase (unittest.TestCase ): def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]: SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-256""" ) pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE__ = ["""vase""", """umbrella""", """white shark""", """white wolf"""] SCREAMING_SNAKE_CASE__ = pipe.get_label_ids(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=4_0 , output_type="""np""" ).images for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = load_numpy( F"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-2 def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple: SCREAMING_SNAKE_CASE__ = DiTPipeline.from_pretrained("""facebook/DiT-XL-2-512""" ) SCREAMING_SNAKE_CASE__ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("""cuda""" ) SCREAMING_SNAKE_CASE__ = ["""vase""", """umbrella"""] SCREAMING_SNAKE_CASE__ = pipe.get_label_ids(__UpperCAmelCase ) SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ = pipe(__UpperCAmelCase , generator=__UpperCAmelCase , num_inference_steps=2_5 , output_type="""np""" ).images for word, image in zip(__UpperCAmelCase , __UpperCAmelCase ): SCREAMING_SNAKE_CASE__ = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" F"""/dit/{word}_512.npy""" ) assert np.abs((expected_image - image).max() ) < 1e-1
165
'''simple docstring''' from PIL import Image def __lowerCamelCase ( _lowercase , _lowercase ) -> Image: def brightness(_lowercase ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_lowercase ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change brightness to 100 a : Optional[Any] = change_brightness(img, 1_0_0) brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
265
0
import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __a = logging.get_logger(__name__) def a ( snake_case__: List[Any] ): '''simple docstring''' lowercase_ = OrderedDict() for key, value in state_dict.items(): if key.startswith('''module.encoder''' ): lowercase_ = key.replace('''module.encoder''' , '''glpn.encoder''' ) if key.startswith('''module.decoder''' ): lowercase_ = key.replace('''module.decoder''' , '''decoder.stages''' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowercase_ = key[key.find('''patch_embed''' ) + len('''patch_embed''' )] lowercase_ = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(_lowercase )-1}''' ) if "norm" in key: lowercase_ = key.replace('''norm''' , '''layer_norm''' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowercase_ = key[key.find('''glpn.encoder.layer_norm''' ) + len('''glpn.encoder.layer_norm''' )] lowercase_ = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(_lowercase )-1}''' ) if "layer_norm1" in key: lowercase_ = key.replace('''layer_norm1''' , '''layer_norm_1''' ) if "layer_norm2" in key: lowercase_ = key.replace('''layer_norm2''' , '''layer_norm_2''' ) if "block" in key: # replace for example block1 by block.0 lowercase_ = key[key.find('''block''' ) + len('''block''' )] lowercase_ = key.replace(F'''block{idx}''' , F'''block.{int(_lowercase )-1}''' ) if "attn.q" in key: lowercase_ = key.replace('''attn.q''' , '''attention.self.query''' ) if "attn.proj" in key: lowercase_ = key.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in key: lowercase_ = key.replace('''attn''' , '''attention.self''' ) if "fc1" in key: lowercase_ = key.replace('''fc1''' , '''dense1''' ) if "fc2" in key: lowercase_ = key.replace('''fc2''' , '''dense2''' ) if "linear_pred" in key: lowercase_ = key.replace('''linear_pred''' , '''classifier''' ) if "linear_fuse" in key: lowercase_ = key.replace('''linear_fuse.conv''' , '''linear_fuse''' ) lowercase_ = key.replace('''linear_fuse.bn''' , '''batch_norm''' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowercase_ = key[key.find('''linear_c''' ) + len('''linear_c''' )] lowercase_ = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(_lowercase )-1}''' ) if "bot_conv" in key: lowercase_ = key.replace('''bot_conv''' , '''0.convolution''' ) if "skip_conv1" in key: lowercase_ = key.replace('''skip_conv1''' , '''1.convolution''' ) if "skip_conv2" in key: lowercase_ = key.replace('''skip_conv2''' , '''2.convolution''' ) if "fusion1" in key: lowercase_ = key.replace('''fusion1''' , '''1.fusion''' ) if "fusion2" in key: lowercase_ = key.replace('''fusion2''' , '''2.fusion''' ) if "fusion3" in key: lowercase_ = key.replace('''fusion3''' , '''3.fusion''' ) if "fusion" in key and "conv" in key: lowercase_ = key.replace('''conv''' , '''convolutional_layer''' ) if key.startswith('''module.last_layer_depth''' ): lowercase_ = key.replace('''module.last_layer_depth''' , '''head.head''' ) lowercase_ = value return new_state_dict def a ( snake_case__: Optional[int] , snake_case__: Tuple ): '''simple docstring''' # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowercase_ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowercase_ = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowercase_ = kv_weight[ : config.hidden_sizes[i], : ] lowercase_ = kv_bias[: config.hidden_sizes[i]] lowercase_ = kv_weight[ config.hidden_sizes[i] :, : ] lowercase_ = kv_bias[config.hidden_sizes[i] :] def a ( ): '''simple docstring''' lowercase_ = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowercase_ = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return image @torch.no_grad() def a ( snake_case__: str , snake_case__: Optional[int] , snake_case__: Union[str, Any]=False , snake_case__: Optional[Any]=None ): '''simple docstring''' lowercase_ = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowercase_ = GLPNImageProcessor() # prepare image lowercase_ = prepare_img() lowercase_ = image_processor(images=_lowercase , return_tensors='''pt''' ).pixel_values logger.info('''Converting model...''' ) # load original state dict lowercase_ = torch.load(_lowercase , map_location=torch.device('''cpu''' ) ) # rename keys lowercase_ = rename_keys(_lowercase ) # key and value matrices need special treatment read_in_k_v(_lowercase , _lowercase ) # create HuggingFace model and load state dict lowercase_ = GLPNForDepthEstimation(_lowercase ) model.load_state_dict(_lowercase ) model.eval() # forward pass lowercase_ = model(_lowercase ) lowercase_ = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowercase_ = torch.tensor( [[4.4_1_4_7, 4.0_8_7_3, 4.0_6_7_3], [3.7_8_9_0, 3.2_8_8_1, 3.1_5_2_5], [3.7_6_7_4, 3.5_4_2_3, 3.4_9_1_3]] ) elif "kitti" in model_name: lowercase_ = torch.tensor( [[3.4_2_9_1, 2.7_8_6_5, 2.5_1_5_1], [3.2_8_4_1, 2.7_0_2_1, 2.3_5_0_2], [3.1_1_4_7, 2.4_6_2_5, 2.2_4_8_1]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowercase_ = torch.Size([1, 480, 640] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , _lowercase , atol=1e-4 ) print('''Looks ok!''' ) # finally, push to hub if required if push_to_hub: logger.info('''Pushing model and image processor to the hub...''' ) model.push_to_hub( repo_path_or_name=Path(_lowercase , _lowercase ) , organization='''nielsr''' , commit_message='''Add model''' , use_temp_dir=_lowercase , ) image_processor.push_to_hub( repo_path_or_name=Path(_lowercase , _lowercase ) , organization='''nielsr''' , commit_message='''Add image processor''' , use_temp_dir=_lowercase , ) if __name__ == "__main__": __a = argparse.ArgumentParser() parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.' ) parser.add_argument( '--model_name', default='glpn-kitti', type=str, help='Name of the model in case you\'re pushing to the hub.', ) __a = parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
30
'''simple docstring''' import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class UpperCamelCase_ : def __init__( self , A , A=13 , A=7 , A=True , A=True , A=True , A=True , A=99 , A=64 , A=5 , A=4 , A=37 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=16 , A=2 , A=0.0_2 , A=3 , A=4 , A=None , ) -> Optional[int]: UpperCAmelCase : List[Any] = parent UpperCAmelCase : Optional[int] = batch_size UpperCAmelCase : Union[str, Any] = seq_length UpperCAmelCase : Optional[Any] = is_training UpperCAmelCase : Dict = use_input_mask UpperCAmelCase : str = use_token_type_ids UpperCAmelCase : List[Any] = use_labels UpperCAmelCase : List[Any] = vocab_size UpperCAmelCase : Dict = hidden_size UpperCAmelCase : Dict = num_hidden_layers UpperCAmelCase : Optional[int] = num_attention_heads UpperCAmelCase : int = intermediate_size UpperCAmelCase : List[str] = hidden_act UpperCAmelCase : List[str] = hidden_dropout_prob UpperCAmelCase : int = attention_probs_dropout_prob UpperCAmelCase : str = max_position_embeddings UpperCAmelCase : Optional[Any] = type_vocab_size UpperCAmelCase : List[str] = type_sequence_label_size UpperCAmelCase : int = initializer_range UpperCAmelCase : str = num_labels UpperCAmelCase : Optional[int] = num_choices UpperCAmelCase : Dict = scope UpperCAmelCase : Union[str, Any] = vocab_size - 1 def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase : Any = None if self.use_input_mask: UpperCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase : List[str] = None if self.use_labels: UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase : Optional[int] = self.get_config() return config, input_ids, input_mask, token_labels def _lowercase( self ) -> Optional[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.prepare_config_and_inputs() UpperCAmelCase : Any = True return config, input_ids, input_mask, token_labels def _lowercase( self , A , A , A ) -> int: UpperCAmelCase : str = GPTNeoXModel(config=A ) model.to(A ) model.eval() UpperCAmelCase : List[str] = model(A , attention_mask=A ) UpperCAmelCase : List[str] = model(A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A ) -> Optional[int]: UpperCAmelCase : str = True UpperCAmelCase : Optional[Any] = GPTNeoXModel(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = model(A , attention_mask=A ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase( self , A , A , A , A ) -> List[str]: UpperCAmelCase : Tuple = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase( self , A , A , A , A ) -> Tuple: UpperCAmelCase : List[str] = self.num_labels UpperCAmelCase : Any = GPTNeoXForQuestionAnswering(A ) model.to(A ) model.eval() UpperCAmelCase : str = model(A , attention_mask=A ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase( self , A , A , A , A ) -> int: UpperCAmelCase : Tuple = self.num_labels UpperCAmelCase : List[str] = GPTNeoXForSequenceClassification(A ) model.to(A ) model.eval() UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase : Tuple = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase( self , A , A , A , A ) -> str: UpperCAmelCase : List[Any] = self.num_labels UpperCAmelCase : Tuple = GPTNeoXForTokenClassification(A ) model.to(A ) model.eval() UpperCAmelCase : int = model(A , attention_mask=A , labels=A ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase( self , A , A , A ) -> Union[str, Any]: UpperCAmelCase : Optional[int] = True UpperCAmelCase : str = GPTNeoXForCausalLM(config=A ) model.to(A ) model.eval() # first forward pass UpperCAmelCase : List[str] = model(A , attention_mask=A , use_cache=A ) UpperCAmelCase : List[Any] = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase : Optional[int] = ids_tensor((self.batch_size, 3) , config.vocab_size ) UpperCAmelCase : Any = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase : Any = torch.cat([input_mask, next_mask] , dim=-1 ) UpperCAmelCase : Dict = model(A , attention_mask=A , output_hidden_states=A ) UpperCAmelCase : Any = output_from_no_past["""hidden_states"""][0] UpperCAmelCase : List[str] = model( A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0] # select random slice UpperCAmelCase : Tuple = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(A , A , atol=1e-3 ) ) def _lowercase( self ) -> int: UpperCAmelCase : Tuple = self.prepare_config_and_inputs() UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = config_and_inputs UpperCAmelCase : Union[str, Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCamelCase_ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) lowercase = (GPTNeoXForCausalLM,) if is_torch_available() else () lowercase = ( { 'feature-extraction': GPTNeoXModel, 'question-answering': GPTNeoXForQuestionAnswering, 'text-classification': GPTNeoXForSequenceClassification, 'text-generation': GPTNeoXForCausalLM, 'token-classification': GPTNeoXForTokenClassification, 'zero-shot': GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _lowercase( self ) -> Union[str, Any]: UpperCAmelCase : str = GPTNeoXModelTester(self ) UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 ) def _lowercase( self ) -> Optional[Any]: self.config_tester.run_common_tests() def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> Optional[Any]: # This regression test was failing with PyTorch < 1.3 UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(A , A , A ) def _lowercase( self ) -> str: UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A ) def _lowercase( self ) -> int: UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*A ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*A ) def _lowercase( self ) -> Any: UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*A ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*A ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def _lowercase( self ) -> Optional[int]: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def _lowercase( self , A ) -> str: UpperCAmelCase , UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase : int = ids_tensor([1, 10] , config.vocab_size ) UpperCAmelCase : Optional[Any] = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Dict = GPTNeoXModel(A ) original_model.to(A ) original_model.eval() UpperCAmelCase : List[str] = original_model(A ).last_hidden_state UpperCAmelCase : Any = original_model(A ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase : Any = {"""type""": scaling_type, """factor""": 1_0.0} UpperCAmelCase : str = GPTNeoXModel(A ) scaled_model.to(A ) scaled_model.eval() UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state UpperCAmelCase : Optional[Any] = scaled_model(A ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(A , A , atol=1e-5 ) ) else: self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(A , A , atol=1e-5 ) ) @require_torch class UpperCamelCase_ ( unittest.TestCase ): @slow def _lowercase( self ) -> List[Any]: UpperCAmelCase : str = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase : int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(A ) UpperCAmelCase : List[Any] = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase : List[str] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase : Union[str, Any] = model.generate(**A , do_sample=A , max_new_tokens=20 ) UpperCAmelCase : Tuple = tokenizer.batch_decode(A )[0] self.assertEqual(A , A )
265
0
"""simple docstring""" import collections import importlib.util import os import re from pathlib import Path __SCREAMING_SNAKE_CASE : str = """src/transformers""" # Matches is_xxx_available() __SCREAMING_SNAKE_CASE : Tuple = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __SCREAMING_SNAKE_CASE : int = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __SCREAMING_SNAKE_CASE : List[str] = re.compile(R'\s+\"\S*\":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __SCREAMING_SNAKE_CASE : Optional[int] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __SCREAMING_SNAKE_CASE : Dict = re.compile(R'^\s*_import_structure\[\"\S*\"\]\.append\(\"(\S*)\"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __SCREAMING_SNAKE_CASE : Any = re.compile('^\s+\"([^\"]+)\",') # Catches a line with objects between brackets only: ["foo", "bar"], __SCREAMING_SNAKE_CASE : List[Any] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'^\s*try:') # Catches a line with else: __SCREAMING_SNAKE_CASE : Union[str, Any] = re.compile(R'^\s*else:') def _a ( _SCREAMING_SNAKE_CASE ) -> Dict: if _re_test_backend.search(_lowercase ) is None: return None snake_case_ = [b[0] for b in _re_backend.findall(_lowercase )] backends.sort() return "_and_".join(_lowercase ) def _a ( _SCREAMING_SNAKE_CASE ) -> str: with open(_lowercase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: snake_case_ = f.readlines() snake_case_ = 0 while line_index < len(_lowercase ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(_lowercase ): return None # First grab the objects without a specific backend in _import_structure snake_case_ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: snake_case_ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(_lowercase ): snake_case_ = _re_one_line_import_struct.search(_lowercase ).groups()[0] snake_case_ = re.findall("""\[([^\]]+)\]""" , _lowercase ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue snake_case_ = _re_import_struct_key_value.search(_lowercase ) if single_line_import_search is not None: snake_case_ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(_lowercase ) > 0] objects.extend(_lowercase ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 snake_case_ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. snake_case_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): snake_case_ = lines[line_index] if _re_import_struct_add_one.search(_lowercase ) is not None: objects.append(_re_import_struct_add_one.search(_lowercase ).groups()[0] ) elif _re_import_struct_add_many.search(_lowercase ) is not None: snake_case_ = _re_import_struct_add_many.search(_lowercase ).groups()[0].split(""", """ ) snake_case_ = [obj[1:-1] for obj in imports if len(_lowercase ) > 0] objects.extend(_lowercase ) elif _re_between_brackets.search(_lowercase ) is not None: snake_case_ = _re_between_brackets.search(_lowercase ).groups()[0].split(""", """ ) snake_case_ = [obj[1:-1] for obj in imports if len(_lowercase ) > 0] objects.extend(_lowercase ) elif _re_quote_object.search(_lowercase ) is not None: objects.append(_re_quote_object.search(_lowercase ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 snake_case_ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend snake_case_ = [] while ( line_index < len(_lowercase ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): snake_case_ = lines[line_index] snake_case_ = _re_import.search(_lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 snake_case_ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(_lowercase ): # If the line is an if is_backend_available, we grab all objects associated. snake_case_ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: snake_case_ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 snake_case_ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): snake_case_ = lines[line_index] snake_case_ = _re_import.search(_lowercase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 snake_case_ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Union[str, Any]: def find_duplicates(_SCREAMING_SNAKE_CASE ): return [k for k, v in collections.Counter(_lowercase ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] snake_case_ = [] for key in import_dict_objects.keys(): snake_case_ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" ) snake_case_ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): snake_case_ = """base imports""" if key == """none""" else f"""{key} backend""" errors.append(f"""Differences for {name}:""" ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" ) return errors def _a ( ) -> Optional[int]: snake_case_ = [] for root, _, files in os.walk(_lowercase ): if "__init__.py" in files: snake_case_ = os.path.join(_lowercase , """__init__.py""" ) snake_case_ = parse_init(_lowercase ) if objects is not None: snake_case_ = analyze_results(*_lowercase ) if len(_lowercase ) > 0: snake_case_ = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}""" failures.append("""\n""".join(_lowercase ) ) if len(_lowercase ) > 0: raise ValueError("""\n\n""".join(_lowercase ) ) def _a ( ) -> Any: snake_case_ = [] for path, directories, files in os.walk(_lowercase ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(_lowercase ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(_lowercase ) / folder).glob("""*.py""" ) ) ) == 0: continue snake_case_ = str((Path(_lowercase ) / folder).relative_to(_lowercase ) ) snake_case_ = short_path.replace(os.path.sep , """.""" ) submodules.append(_lowercase ) for fname in files: if fname == "__init__.py": continue snake_case_ = str((Path(_lowercase ) / fname).relative_to(_lowercase ) ) snake_case_ = short_path.replace(""".py""" , """""" ).replace(os.path.sep , """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(_lowercase ) return submodules __SCREAMING_SNAKE_CASE : Tuple = [ """convert_pytorch_checkpoint_to_tf2""", """modeling_flax_pytorch_utils""", ] def _a ( ) -> List[str]: # This is to make sure the transformers module imported is the one in the repo. snake_case_ = importlib.util.spec_from_file_location( """transformers""" , os.path.join(_lowercase , """__init__.py""" ) , submodule_search_locations=[PATH_TO_TRANSFORMERS] , ) snake_case_ = spec.loader.load_module() snake_case_ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(_lowercase ) > 0: snake_case_ = """\n""".join(f"""- {module}""" for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f"""{list_of_modules}\n""" """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
347
'''simple docstring''' def __lowerCamelCase ( _lowercase , _lowercase ) -> int: if b == 0: return 1 if (b % 2) == 0: return actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) else: return a * actual_power(_lowercase , int(b / 2 ) ) * actual_power(_lowercase , int(b / 2 ) ) def __lowerCamelCase ( _lowercase , _lowercase ) -> float: if b < 0: return 1 / actual_power(_lowercase , _lowercase ) return actual_power(_lowercase , _lowercase ) if __name__ == "__main__": print(power(-2, -3))
265
0
import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCamelCase__ ( __lowercase , __lowercase , unittest.TestCase): '''simple docstring''' _A = StableDiffusionDiffEditPipeline _A = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} _A = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} _A = frozenset( []) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess _A = frozenset([]) def _lowerCamelCase ( self :str ) -> Optional[int]: torch.manual_seed(0 ) __UpperCamelCase : Dict = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=a , ) __UpperCamelCase : int = DDIMScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_one=a , ) __UpperCamelCase : List[Any] = DDIMInverseScheduler( beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=a , set_alpha_to_zero=a , ) torch.manual_seed(0 ) __UpperCamelCase : int = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , ) torch.manual_seed(0 ) __UpperCamelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , ) __UpperCamelCase : Optional[Any] = CLIPTextModel(a ) __UpperCamelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) __UpperCamelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowerCamelCase ( self :Any , a :int , a :Any=0 ) -> Optional[Any]: __UpperCamelCase : Any = floats_tensor((1, 1_6, 1_6) , rng=random.Random(a ) ).to(a ) __UpperCamelCase : Any = floats_tensor((1, 2, 4, 1_6, 1_6) , rng=random.Random(a ) ).to(a ) if str(a ).startswith("mps" ): __UpperCamelCase : List[Any] = torch.manual_seed(a ) else: __UpperCamelCase : List[Any] = torch.Generator(device=a ).manual_seed(a ) __UpperCamelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowerCamelCase ( self :Tuple , a :int , a :Dict=0 ) -> Optional[int]: __UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a ) __UpperCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : int = Image.fromarray(np.uinta(a ) ).convert("RGB" ) if str(a ).startswith("mps" ): __UpperCamelCase : Optional[int] = torch.manual_seed(a ) else: __UpperCamelCase : Optional[Any] = torch.Generator(device=a ).manual_seed(a ) __UpperCamelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowerCamelCase ( self :List[str] , a :Optional[Any] , a :Any=0 ) -> str: __UpperCamelCase : int = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(a ) ).to(a ) __UpperCamelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCamelCase : int = Image.fromarray(np.uinta(a ) ).convert("RGB" ) if str(a ).startswith("mps" ): __UpperCamelCase : Optional[int] = torch.manual_seed(a ) else: __UpperCamelCase : Optional[int] = torch.Generator(device=a ).manual_seed(a ) __UpperCamelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowerCamelCase ( self :Optional[Any] ) -> List[Any]: if not hasattr(self.pipeline_class , "_optional_components" ): return __UpperCamelCase : Dict = self.get_dummy_components() __UpperCamelCase : int = self.pipeline_class(**a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(a , a , a ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) __UpperCamelCase : Any = self.get_dummy_inputs(a ) __UpperCamelCase : Optional[Any] = pipe(**a )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(a ) __UpperCamelCase : Dict = self.pipeline_class.from_pretrained(a ) pipe_loaded.to(a ) pipe_loaded.set_progress_bar_config(disable=a ) for optional_component in pipe._optional_components: self.assertTrue( getattr(a , a ) is None , f'`{optional_component}` did not stay set to None after loading.' , ) __UpperCamelCase : Union[str, Any] = self.get_dummy_inputs(a ) __UpperCamelCase : Tuple = pipe_loaded(**a )[0] __UpperCamelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(a , 1E-4 ) def _lowerCamelCase ( self :int ) -> Optional[int]: __UpperCamelCase : Optional[int] = """cpu""" __UpperCamelCase : Optional[Any] = self.get_dummy_components() __UpperCamelCase : List[str] = self.pipeline_class(**a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) __UpperCamelCase : Union[str, Any] = self.get_dummy_mask_inputs(a ) __UpperCamelCase : List[Any] = pipe.generate_mask(**a ) __UpperCamelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 1_6, 1_6) ) __UpperCamelCase : Optional[int] = np.array([0] * 9 ) __UpperCamelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(a , 1E-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowerCamelCase ( self :int ) -> Optional[Any]: __UpperCamelCase : Optional[Any] = """cpu""" __UpperCamelCase : List[str] = self.get_dummy_components() __UpperCamelCase : Optional[Any] = self.pipeline_class(**a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) __UpperCamelCase : Optional[int] = self.get_dummy_inversion_inputs(a ) __UpperCamelCase : List[str] = pipe.invert(**a ).images __UpperCamelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 3_2, 3_2, 3) ) __UpperCamelCase : Dict = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , ) __UpperCamelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a , 1E-3 ) def _lowerCamelCase ( self :Union[str, Any] ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5E-3 ) def _lowerCamelCase ( self :int ) -> int: __UpperCamelCase : List[Any] = """cpu""" __UpperCamelCase : int = self.get_dummy_components() __UpperCamelCase : List[Any] = {"""beta_start""": 0.00085, """beta_end""": 0.012, """beta_schedule""": """scaled_linear"""} __UpperCamelCase : int = DPMSolverMultistepScheduler(**a ) __UpperCamelCase : int = DPMSolverMultistepInverseScheduler(**a ) __UpperCamelCase : List[str] = self.pipeline_class(**a ) pipe.to(a ) pipe.set_progress_bar_config(disable=a ) __UpperCamelCase : Tuple = self.get_dummy_inversion_inputs(a ) __UpperCamelCase : Any = pipe.invert(**a ).images __UpperCamelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 3_2, 3_2, 3) ) __UpperCamelCase : Any = np.array( [0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , ) __UpperCamelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(a , 1E-3 ) @require_torch_gpu @slow class lowerCamelCase__ ( unittest.TestCase): '''simple docstring''' def _lowerCamelCase ( self :Dict ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowerCamelCase ( cls :Optional[int] ) -> Dict: __UpperCamelCase : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png" ) __UpperCamelCase : Union[str, Any] = raw_image.convert("RGB" ).resize((7_6_8, 7_6_8) ) __UpperCamelCase : List[str] = raw_image def _lowerCamelCase ( self :Optional[int] ) -> Optional[Any]: __UpperCamelCase : Dict = torch.manual_seed(0 ) __UpperCamelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a , torch_dtype=torch.floataa ) __UpperCamelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) __UpperCamelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a ) __UpperCamelCase : Tuple = """a bowl of fruit""" __UpperCamelCase : List[Any] = """a bowl of pears""" __UpperCamelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) __UpperCamelCase : Tuple = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a ).latents __UpperCamelCase : Any = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , output_type="numpy" , ).images[0] __UpperCamelCase : List[str] = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) ) / 2_5_5 ) assert np.abs((expected_image - image).max() ) < 5E-1 def _lowerCamelCase ( self :Union[str, Any] ) -> Optional[Any]: __UpperCamelCase : Optional[Any] = torch.manual_seed(0 ) __UpperCamelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( "stabilityai/stable-diffusion-2-1" , safety_checker=a , torch_dtype=torch.floataa ) __UpperCamelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) __UpperCamelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=a ) __UpperCamelCase : int = """a bowl of fruit""" __UpperCamelCase : int = """a bowl of pears""" __UpperCamelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=a , target_prompt=a , generator=a , ) __UpperCamelCase : Any = pipe.invert( prompt=a , image=self.raw_image , inpaint_strength=0.7 , generator=a , num_inference_steps=2_5 , ).latents __UpperCamelCase : str = pipe( prompt=a , mask_image=a , image_latents=a , generator=a , negative_prompt=a , inpaint_strength=0.7 , num_inference_steps=2_5 , output_type="numpy" , ).images[0] __UpperCamelCase : Tuple = ( np.array( load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/diffedit/pears.png" ).resize((7_6_8, 7_6_8) ) ) / 2_5_5 ) assert np.abs((expected_image - image).max() ) < 5E-1
232
'''simple docstring''' import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin a : Any = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( __magic_name__ , unittest.TestCase ): lowercase = AlbertTokenizer lowercase = AlbertTokenizerFast lowercase = True lowercase = True lowercase = True def _lowercase( self ) -> str: super().setUp() # We have a SentencePiece fixture for testing UpperCAmelCase : Optional[int] = AlbertTokenizer(A ) tokenizer.save_pretrained(self.tmpdirname ) def _lowercase( self , A ) -> int: UpperCAmelCase : Optional[int] = """this is a test""" UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def _lowercase( self ) -> int: UpperCAmelCase : Tuple = """<pad>""" UpperCAmelCase : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A ) def _lowercase( self ) -> Any: UpperCAmelCase : str = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(A ) , 30000 ) def _lowercase( self ) -> Tuple: self.assertEqual(self.get_tokenizer().vocab_size , 30000 ) def _lowercase( self ) -> Union[str, Any]: if not self.test_rust_tokenizer: return UpperCAmelCase : int = self.get_tokenizer() UpperCAmelCase : List[str] = self.get_rust_tokenizer() UpperCAmelCase : Optional[Any] = """I was born in 92000, and this is falsé.""" UpperCAmelCase : str = tokenizer.tokenize(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.tokenize(A ) self.assertListEqual(A , A ) UpperCAmelCase : Any = tokenizer.encode(A , add_special_tokens=A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A , add_special_tokens=A ) self.assertListEqual(A , A ) UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() UpperCAmelCase : Union[str, Any] = tokenizer.encode(A ) UpperCAmelCase : Optional[int] = rust_tokenizer.encode(A ) self.assertListEqual(A , A ) def _lowercase( self ) -> Any: UpperCAmelCase : List[Any] = AlbertTokenizer(A , keep_accents=A ) UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(A , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) , [48, 25, 21, 1289] ) UpperCAmelCase : Optional[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(A ) self.assertListEqual(A , [31, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9] ) UpperCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(A ) self.assertListEqual( A , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : str = AlbertTokenizer(A ) UpperCAmelCase : Optional[int] = tokenizer.encode("""sequence builders""" ) UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) UpperCAmelCase : Optional[Any] = tokenizer.build_inputs_with_special_tokens(A ) UpperCAmelCase : List[str] = tokenizer.build_inputs_with_special_tokens(A , A ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def _lowercase( self ) -> Dict: # fmt: off UpperCAmelCase : Tuple = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21970, 13, 5, 6092, 167, 28, 7103, 2153, 673, 8, 7028, 12051, 18, 17, 7103, 2153, 673, 8, 3515, 18684, 8, 4461, 6, 1927, 297, 8, 12060, 2607, 18, 13, 5, 4461, 15, 10538, 38, 8, 135, 15, 822, 58, 15, 993, 10363, 15, 1460, 8005, 4461, 15, 993, 255, 2328, 9, 9, 9, 6, 26, 1112, 816, 3260, 13, 5, 103, 2377, 6, 17, 1112, 816, 2782, 13, 5, 103, 10641, 6, 29, 84, 2512, 2430, 782, 18684, 2761, 19, 808, 2430, 2556, 17, 855, 1480, 9477, 4091, 128, 11712, 15, 7103, 2153, 673, 17, 24883, 9990, 9, 3], [2, 11502, 25, 1006, 20, 782, 8, 11809, 855, 1732, 19393, 18667, 37, 367, 21018, 69, 1854, 34, 11860, 19124, 27, 156, 225, 17, 193, 4141, 19, 65, 9124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2231, 886, 2385, 17659, 84, 14, 16792, 1952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=A , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
265
0
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class _lowercase ( lowerCAmelCase, unittest.TestCase ): """simple docstring""" __A = FunnelTokenizer __A = FunnelTokenizerFast __A = True __A = True def UpperCamelCase_ (self ): """simple docstring""" super().setUp() a = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] a = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) ) def UpperCamelCase_ (self , **lowerCamelCase_ ): """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def UpperCamelCase_ (self , **lowerCamelCase_ ): """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **lowerCamelCase_ ) def UpperCamelCase_ (self , lowerCamelCase_ ): """simple docstring""" a = """UNwant\u00E9d,running""" a = """unwanted, running""" return input_text, output_text def UpperCamelCase_ (self ): """simple docstring""" a = self.tokenizer_class(self.vocab_file ) a = tokenizer.tokenize("UNwant\u00E9d,running" ) self.assertListEqual(lowerCamelCase_ , ["un", "##want", "##ed", ",", "runn", "##ing"] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase_ ) , [7, 4, 5, 10, 8, 9] ) def UpperCamelCase_ (self ): """simple docstring""" a = self.get_tokenizers(do_lower_case=lowerCamelCase_ ) for tokenizer in tokenizers: a = tokenizer("UNwant\u00E9d,running" ) a = len(inputs["input_ids"] ) - 1 self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len ) a = tokenizer("UNwant\u00E9d,running" , "UNwant\u00E9d,running" ) self.assertListEqual(inputs["token_type_ids"] , [2] + [0] * sentence_len + [1] * sentence_len )
227
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMInverseScheduler, DDIMScheduler, DPMSolverMultistepInverseScheduler, DPMSolverMultistepScheduler, StableDiffusionDiffEditPipeline, UNetaDConditionModel, ) from diffusers.utils import load_image, slow from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class UpperCamelCase_ ( __magic_name__ , __magic_name__ , unittest.TestCase ): lowercase = StableDiffusionDiffEditPipeline lowercase = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'height', 'width', 'image'} | {'image_latents'} lowercase = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'image'} | {'image_latents'} lowercase = frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess lowercase = frozenset([] ) def _lowercase( self ) -> Optional[int]: torch.manual_seed(0 ) UpperCAmelCase : Dict = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=A , ) UpperCAmelCase : int = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_one=A , ) UpperCAmelCase : List[Any] = DDIMInverseScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="""scaled_linear""" , clip_sample=A , set_alpha_to_zero=A , ) torch.manual_seed(0 ) UpperCAmelCase : int = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , ) torch.manual_seed(0 ) UpperCAmelCase : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , ) UpperCAmelCase : Optional[Any] = CLIPTextModel(A ) UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """inverse_scheduler""": inverse_scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _lowercase( self , A , A=0 ) -> Optional[Any]: UpperCAmelCase : Any = floats_tensor((1, 16, 16) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Any = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(A ) ).to(A ) if str(A ).startswith("""mps""" ): UpperCAmelCase : List[Any] = torch.manual_seed(A ) else: UpperCAmelCase : List[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : int = { """prompt""": """a dog and a newt""", """mask_image""": mask, """image_latents""": latents, """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> Optional[int]: UpperCAmelCase : Optional[Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[Any] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : Any = { """image""": image, """source_prompt""": """a cat and a frog""", """target_prompt""": """a dog and a newt""", """generator""": generator, """num_inference_steps""": 2, """num_maps_per_mask""": 2, """mask_encode_strength""": 1.0, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _lowercase( self , A , A=0 ) -> str: UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(A ) ).to(A ) UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase : int = Image.fromarray(np.uinta(A ) ).convert("""RGB""" ) if str(A ).startswith("""mps""" ): UpperCAmelCase : Optional[int] = torch.manual_seed(A ) else: UpperCAmelCase : Optional[int] = torch.Generator(device=A ).manual_seed(A ) UpperCAmelCase : str = { """image""": image, """prompt""": """a cat and a frog""", """generator""": generator, """num_inference_steps""": 2, """inpaint_strength""": 1.0, """guidance_scale""": 6.0, """decode_latents""": True, """output_type""": """numpy""", } return inputs def _lowercase( self ) -> List[Any]: if not hasattr(self.pipeline_class , """_optional_components""" ): return UpperCAmelCase : Dict = self.get_dummy_components() UpperCAmelCase : int = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) # set all optional components to None and update pipeline config accordingly for optional_component in pipe._optional_components: setattr(A , A , A ) pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} ) UpperCAmelCase : Any = self.get_dummy_inputs(A ) UpperCAmelCase : Optional[Any] = pipe(**A )[0] with tempfile.TemporaryDirectory() as tmpdir: pipe.save_pretrained(A ) UpperCAmelCase : Dict = self.pipeline_class.from_pretrained(A ) pipe_loaded.to(A ) pipe_loaded.set_progress_bar_config(disable=A ) for optional_component in pipe._optional_components: self.assertTrue( getattr(A , A ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , ) UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(A ) UpperCAmelCase : Tuple = pipe_loaded(**A )[0] UpperCAmelCase : Optional[Any] = np.abs(output - output_loaded ).max() self.assertLess(A , 1e-4 ) def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[int] = """cpu""" UpperCAmelCase : Optional[Any] = self.get_dummy_components() UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Union[str, Any] = self.get_dummy_mask_inputs(A ) UpperCAmelCase : List[Any] = pipe.generate_mask(**A ) UpperCAmelCase : Dict = mask[0, -3:, -3:] self.assertEqual(mask.shape , (1, 16, 16) ) UpperCAmelCase : Optional[int] = np.array([0] * 9 ) UpperCAmelCase : Union[str, Any] = np.abs(mask_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) self.assertEqual(mask[0, -3, -4] , 0 ) def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = """cpu""" UpperCAmelCase : List[str] = self.get_dummy_components() UpperCAmelCase : Optional[Any] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : List[str] = pipe.invert(**A ).images UpperCAmelCase : Union[str, Any] = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Dict = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Dict = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) def _lowercase( self ) -> Dict: super().test_inference_batch_single_identical(expected_max_diff=5e-3 ) def _lowercase( self ) -> int: UpperCAmelCase : List[Any] = """cpu""" UpperCAmelCase : int = self.get_dummy_components() UpperCAmelCase : List[Any] = {"""beta_start""": 0.0_0_0_8_5, """beta_end""": 0.0_1_2, """beta_schedule""": """scaled_linear"""} UpperCAmelCase : int = DPMSolverMultistepScheduler(**A ) UpperCAmelCase : int = DPMSolverMultistepInverseScheduler(**A ) UpperCAmelCase : List[str] = self.pipeline_class(**A ) pipe.to(A ) pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = self.get_dummy_inversion_inputs(A ) UpperCAmelCase : Any = pipe.invert(**A ).images UpperCAmelCase : Dict = image[0, -1, -3:, -3:] self.assertEqual(image.shape , (2, 32, 32, 3) ) UpperCAmelCase : Any = np.array( [0.5_1_5_0, 0.5_1_3_4, 0.5_0_4_3, 0.5_3_7_6, 0.4_6_9_4, 0.5_1_0_5_0, 0.5_0_1_5, 0.4_4_0_7, 0.4_7_9_9] , ) UpperCAmelCase : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(A , 1e-3 ) @require_torch_gpu @slow class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> int: super().tearDown() gc.collect() torch.cuda.empty_cache() @classmethod def _lowercase( cls ) -> Dict: UpperCAmelCase : Tuple = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" ) UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) ) UpperCAmelCase : List[str] = raw_image def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Dict = torch.manual_seed(0 ) UpperCAmelCase : int = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Optional[int] = DDIMInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : Tuple = """a bowl of fruit""" UpperCAmelCase : List[Any] = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Tuple = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A ).latents UpperCAmelCase : Any = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0] UpperCAmelCase : List[str] = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1 def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[Any] = torch.manual_seed(0 ) UpperCAmelCase : Union[str, Any] = StableDiffusionDiffEditPipeline.from_pretrained( """stabilityai/stable-diffusion-2-1""" , safety_checker=A , torch_dtype=torch.floataa ) UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) UpperCAmelCase : Any = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config ) pipe.enable_model_cpu_offload() pipe.set_progress_bar_config(disable=A ) UpperCAmelCase : int = """a bowl of fruit""" UpperCAmelCase : int = """a bowl of pears""" UpperCAmelCase : str = pipe.generate_mask( image=self.raw_image , source_prompt=A , target_prompt=A , generator=A , ) UpperCAmelCase : Any = pipe.invert( prompt=A , image=self.raw_image , inpaint_strength=0.7 , generator=A , num_inference_steps=25 , ).latents UpperCAmelCase : str = pipe( prompt=A , mask_image=A , image_latents=A , generator=A , negative_prompt=A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0] UpperCAmelCase : Tuple = ( np.array( load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/diffedit/pears.png""" ).resize((768, 768) ) ) / 255 ) assert np.abs((expected_image - image).max() ) < 5e-1
265
0
def __UpperCAmelCase ( a_ , a_): if len(_lowercase) != len(_lowercase): raise ValueError('String lengths must match!') snake_case_ = 0 for chara, chara in zip(_lowercase , _lowercase): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
178
'''simple docstring''' import argparse import json import os import torch from torch import nn from transformers import NllbMoeConfig, NllbMoeModel from transformers.modeling_utils import dtype_byte_size from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME def __lowerCamelCase ( _lowercase ) -> Union[str, Any]: UpperCAmelCase : Dict = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """decoder.output_projection.weight""", """_float_tensor""", """encoder.embed_positions._float_tensor""", """decoder.embed_positions._float_tensor""", ] for k in ignore_keys: state_dict.pop(_lowercase , _lowercase ) def __lowerCamelCase ( _lowercase ) -> Tuple: UpperCAmelCase , UpperCAmelCase : List[str] = emb.weight.shape UpperCAmelCase : Dict = nn.Linear(_lowercase , _lowercase , bias=_lowercase ) UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def __lowerCamelCase ( _lowercase , _lowercase=None ) -> List[Any]: UpperCAmelCase : Dict = {} for old_key in state_dict.keys(): UpperCAmelCase : str = old_key if "moe_layer.experts." in key: if expert_idx is not None: UpperCAmelCase : int = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''' ) else: UpperCAmelCase : str = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" ) if "gate" in key: UpperCAmelCase : str = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" ) if "fc2" and "experts" not in key: UpperCAmelCase : Tuple = key.replace(""".fc2.""" , """.ffn.fc2.""" ) if "fc1" and "experts" not in key: UpperCAmelCase : List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""" ) if ".encoder_attn." in key: UpperCAmelCase : Union[str, Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""" ) if "encoder_attn_layer_norm" in key: UpperCAmelCase : str = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" ) if "final_layer_norm" in key: UpperCAmelCase : Optional[int] = key.replace("""final_layer_norm""" , """ff_layer_norm""" ) UpperCAmelCase : str = state_dict[old_key] return new_dict def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase = WEIGHTS_NAME ) -> Tuple: UpperCAmelCase : Any = [] UpperCAmelCase : Dict = 0 os.makedirs(_lowercase , exist_ok=_lowercase ) for expert in range(_lowercase ): UpperCAmelCase : str = switch_checkpoint_path + F'''-rank-{expert}.pt''' if os.path.isfile(_lowercase ): UpperCAmelCase : Optional[Any] = torch.load(_lowercase )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : List[str] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : Optional[Any] = os.path.join( _lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) torch.save(_lowercase , _lowercase ) sharded_state_dicts.append(expert_state.keys() ) total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size( expert_state[list(_lowercase )[0]].dtype ) # Add the last block UpperCAmelCase : Optional[Any] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{len(_lowercase )+1:05d}-of-???.bin''' ) ) UpperCAmelCase : Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""] remove_ignore_keys_(_lowercase ) UpperCAmelCase : Optional[Any] = rename_fairseq_keys(_lowercase , _lowercase ) UpperCAmelCase : int = shared_weights["""decoder.embed_tokens.weight"""] sharded_state_dicts.append(shared_weights.keys() ) # If we only have the shared weights (dummy model/experts saved on the same file) if len(_lowercase ) == 1: UpperCAmelCase : Dict = os.path.join(_lowercase , _lowercase ) torch.save(_lowercase , _lowercase ) return {weights_name: sharded_state_dicts[0]}, None else: torch.save(_lowercase , _lowercase ) # Otherwise, let's build the index UpperCAmelCase : Optional[int] = {} for idx, shard in enumerate(_lowercase ): UpperCAmelCase : List[str] = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(_lowercase ):05d}.bin''' ) UpperCAmelCase : List[str] = os.path.join(_lowercase , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin''' ) ) os.rename(_lowercase , os.path.join(_lowercase , _lowercase ) ) for key in shard: UpperCAmelCase : Tuple = shard_file # Add the metadata UpperCAmelCase : Any = {"""total_size""": total_size} UpperCAmelCase : Optional[Any] = {"""metadata""": metadata, """weight_map""": weight_map} with open(os.path.join(_lowercase , _lowercase ) , """w""" , encoding="""utf-8""" ) as f: UpperCAmelCase : str = json.dumps(_lowercase , indent=2 , sort_keys=_lowercase ) + """\n""" f.write(_lowercase ) return metadata, index if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--nllb_moe_checkpoint_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000""", type=str, required=False, help="""Path to a directory containing a folder per layer. Follows the original Google format.""", ) parser.add_argument("""--dtype""", default="""float32""", type=str, required=False, help="""dtype of the saved model""") parser.add_argument( """--pytorch_dump_folder_path""", default="""/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b""", type=str, required=False, help="""Path to the output pytorch model.""", ) a : int = parser.parse_args() a , a : Any = shard_on_the_fly( args.nllb_moe_checkpoint_path, args.pytorch_dump_folder_path, 1_2_8, args.dtype, ) a : str = NllbMoeConfig.from_pretrained( """facebook/nllb-200-3.3B""", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8 ) config.save_pretrained(args.pytorch_dump_folder_path) a : Optional[int] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path) print("""Done""") model.save_pretrained(args.pytorch_dump_folder_path)
265
0
import torch from accelerate import PartialState from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> Tuple: return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device ) def lowerCamelCase__ ( snake_case_ : List[str] ) -> List[Any]: __snake_case = create_tensor(_lowercase ) __snake_case = gather(_lowercase ) assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) ) def lowerCamelCase__ ( snake_case_ : List[Any] ) -> Optional[int]: __snake_case = [state.process_index] __snake_case = gather_object(_lowercase ) assert len(_lowercase ) == state.num_processes, f"""{gathered_obj}, {len(_lowercase )} != {state.num_processes}""" assert gathered_obj == list(range(state.num_processes ) ), f"""{gathered_obj} != {list(range(state.num_processes ) )}""" def lowerCamelCase__ ( snake_case_ : str ) -> List[Any]: __snake_case = create_tensor(_lowercase ) __snake_case = broadcast(_lowercase ) assert broadcasted_tensor.shape == torch.Size([state.num_processes] ) assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) ) def lowerCamelCase__ ( snake_case_ : Any ) -> Tuple: # We need to pad the tensor with one more element if we are the main process # to ensure that we can pad if state.is_main_process: __snake_case = torch.arange(state.num_processes + 1 ).to(state.device ) else: __snake_case = torch.arange(state.num_processes ).to(state.device ) __snake_case = pad_across_processes(_lowercase ) assert padded_tensor.shape == torch.Size([state.num_processes + 1] ) if not state.is_main_process: assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0] def lowerCamelCase__ ( snake_case_ : Optional[int] ) -> Dict: # For now runs on only two processes if state.num_processes != 2: return __snake_case = create_tensor(_lowercase ) __snake_case = reduce(_lowercase , '''sum''' ) __snake_case = torch.tensor([4.0, 6] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), f"""{reduced_tensor} != {truth_tensor}""" def lowerCamelCase__ ( snake_case_ : List[str] ) -> Optional[Any]: # For now runs on only two processes if state.num_processes != 2: return __snake_case = create_tensor(_lowercase ) __snake_case = reduce(_lowercase , '''mean''' ) __snake_case = torch.tensor([2.0, 3] ).to(state.device ) assert torch.allclose(_lowercase , _lowercase ), f"""{reduced_tensor} != {truth_tensor}""" def lowerCamelCase__ ( snake_case_ : Dict ) -> Optional[int]: # For xla_spawn (TPUs) main() def lowerCamelCase__ ( ) -> int: __snake_case = PartialState() state.print(f"""State: {state}""" ) state.print('''testing gather''' ) test_gather(_lowercase ) state.print('''testing gather_object''' ) test_gather_object(_lowercase ) state.print('''testing broadcast''' ) test_broadcast(_lowercase ) state.print('''testing pad_across_processes''' ) test_pad_across_processes(_lowercase ) state.print('''testing reduce_sum''' ) test_reduce_sum(_lowercase ) state.print('''testing reduce_mean''' ) test_reduce_mean(_lowercase ) if __name__ == "__main__": main()
24
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() a : List[Any] = logging.get_logger(__name__) a : Union[str, Any] = torch.device("""cpu""") def __lowerCamelCase ( ) -> Any: UpperCAmelCase : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" UpperCAmelCase : Dict = Image.open(requests.get(_lowercase , stream=_lowercase ).raw ) return im def __lowerCamelCase ( _lowercase ) -> Dict: if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_703e00, 2.1_107e00, -2.0_811e00, 8.8_685e-01, 2.4_360e-01] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_636e-01, 2.3_478e-01, -1.6_963e00, -1.7_381e00, -8.6_337e-01] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_768e-01, -4.7_429e-01, -1.0_897e00, -1.0_248e00, 3.5_523e-02] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_330e-01, 2.4_211e-01, -6.0_185e-01, -8.2_789e-01, -6.0_446e-02] ) def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> str: UpperCAmelCase : Union[str, Any] = dct.pop(_lowercase ) UpperCAmelCase : str = val def __lowerCamelCase ( _lowercase ) -> List[str]: UpperCAmelCase : Tuple = [] for k in state_dict.keys(): UpperCAmelCase : Dict = k if ".pwconv" in k: UpperCAmelCase : Union[str, Any] = k_new.replace(""".pwconv""" , """.point_wise_conv""" ) if ".dwconv" in k: UpperCAmelCase : Dict = k_new.replace(""".dwconv""" , """.depth_wise_conv""" ) if ".Proj." in k: UpperCAmelCase : str = k_new.replace(""".Proj.""" , """.proj.""" ) if "patch_embed" in k_new: UpperCAmelCase : Dict = k_new.replace("""patch_embed""" , """swiftformer.patch_embed.patch_embedding""" ) if "network" in k_new: UpperCAmelCase : Optional[Any] = k_new.split(""".""" ) if ls[2].isdigit(): UpperCAmelCase : Any = """swiftformer.encoder.network.""" + ls[1] + """.blocks.""" + ls[2] + """.""" + """.""".join(ls[3:] ) else: UpperCAmelCase : Any = k_new.replace("""network""" , """swiftformer.encoder.network""" ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCamelCase ( _lowercase , _lowercase , _lowercase ) -> Optional[int]: UpperCAmelCase : Optional[Any] = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size UpperCAmelCase : List[Any] = 1_0_0_0 UpperCAmelCase : List[str] = """huggingface/label-files""" UpperCAmelCase : Tuple = """imagenet-1k-id2label.json""" UpperCAmelCase : Dict = json.load(open(hf_hub_download(_lowercase , _lowercase , repo_type="""dataset""" ) , """r""" ) ) UpperCAmelCase : Tuple = {int(_lowercase ): v for k, v in idalabel.items()} UpperCAmelCase : Tuple = idalabel UpperCAmelCase : List[Any] = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": UpperCAmelCase : List[Any] = [3, 3, 6, 4] UpperCAmelCase : int = [4_8, 5_6, 1_1_2, 2_2_0] elif swiftformer_name == "swiftformer_s": UpperCAmelCase : str = [3, 3, 9, 6] UpperCAmelCase : str = [4_8, 6_4, 1_6_8, 2_2_4] elif swiftformer_name == "swiftformer_l1": UpperCAmelCase : List[Any] = [4, 3, 1_0, 5] UpperCAmelCase : Union[str, Any] = [4_8, 9_6, 1_9_2, 3_8_4] elif swiftformer_name == "swiftformer_l3": UpperCAmelCase : Any = [4, 4, 1_2, 6] UpperCAmelCase : List[Any] = [6_4, 1_2_8, 3_2_0, 5_1_2] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith("""https""" ): UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_lowercase , map_location="""cpu""" , check_hash=_lowercase ) else: UpperCAmelCase : Dict = torch.load(_lowercase , map_location="""cpu""" ) UpperCAmelCase : str = checkpoint UpperCAmelCase : Tuple = create_rename_keys(_lowercase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_lowercase , _lowercase , _lowercase ) # load HuggingFace model UpperCAmelCase : str = SwiftFormerForImageClassification(_lowercase ).eval() hf_model.load_state_dict(_lowercase ) # prepare test inputs UpperCAmelCase : Any = prepare_img() UpperCAmelCase : List[Any] = ViTImageProcessor.from_pretrained("""preprocessor_config""" ) UpperCAmelCase : List[str] = processor(images=_lowercase , return_tensors="""pt""" ) # compare outputs from both models UpperCAmelCase : List[str] = get_expected_output(_lowercase ) UpperCAmelCase : Dict = hf_model(inputs["""pixel_values"""] ).logits assert hf_logits.shape == torch.Size([1, 1_0_0_0] ) assert torch.allclose(hf_logits[0, 0:5] , _lowercase , atol=1e-3 ) Path(_lowercase ).mkdir(exist_ok=_lowercase ) print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' ) hf_model.save_pretrained(_lowercase ) if __name__ == "__main__": a : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") a : Union[str, Any] = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
265
0