code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" class lowerCamelCase__ : # Public class to implement a graph """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' __UpperCAmelCase : List[str] = row __UpperCAmelCase : List[str] = col __UpperCAmelCase : List[Any] = graph def lowerCamelCase__ ( self : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' return ( 0 <= i < self.ROW and 0 <= j < self.COL and not visited[i][j] and self.graph[i][j] ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : list[list[bool]] ): '''simple docstring''' __UpperCAmelCase : int = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order __UpperCAmelCase : Optional[Any] = [-1, 0, 1, -1, 1, -1, 0, 1] __UpperCAmelCase : int = True # Make those cells visited for k in range(8 ): if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ): self.diffs(i + row_nbr[k] , j + col_nbr[k] , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): # And finally, count all islands. '''simple docstring''' __UpperCAmelCase : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )] __UpperCAmelCase : Optional[Any] = 0 for i in range(self.ROW ): for j in range(self.COL ): if visited[i][j] is False and self.graph[i][j] == 1: self.diffs(UpperCamelCase , UpperCamelCase , UpperCamelCase ) count += 1 return count
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_deit import DeiTImageProcessor UpperCAmelCase : Dict = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : int , *UpperCamelCase : Any , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DeiTImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" # Lint as: python3 # pylint: enable=line-too-long # pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position UpperCAmelCase : Dict = '2.13.1' import platform import pyarrow from packaging import version if version.parse(platform.python_version()) < version.parse('3.7'): raise ImportWarning( 'To use `datasets`, Python>=3.7 is required, and the current version of Python doesn\'t match this condition.' ) if version.parse(pyarrow.__version__).major < 8: raise ImportWarning( 'To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn\'t match this condition.\n' 'If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`.' ) del platform del pyarrow del version from .arrow_dataset import Dataset from .arrow_reader import ReadInstruction from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder from .combine import concatenate_datasets, interleave_datasets from .dataset_dict import DatasetDict, IterableDatasetDict from .download import * from .features import * from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled from .info import DatasetInfo, MetricInfo from .inspect import ( get_dataset_config_info, get_dataset_config_names, get_dataset_infos, get_dataset_split_names, inspect_dataset, inspect_metric, list_datasets, list_metrics, ) from .iterable_dataset import IterableDataset from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric from .metric import Metric from .splits import ( NamedSplit, NamedSplitAll, Split, SplitBase, SplitDict, SplitGenerator, SplitInfo, SubSplitInfo, percent, ) from .tasks import * from .utils import * from .utils import logging # deprecated modules from datasets import arrow_dataset as _arrow_dataset # isort:skip from datasets import utils as _utils # isort:skip from datasets.utils import download_manager as _deprecated_download_manager # isort:skip UpperCAmelCase : List[str] = concatenate_datasets UpperCAmelCase : Optional[Any] = DownloadConfig UpperCAmelCase : Tuple = DownloadManager UpperCAmelCase : List[str] = DownloadMode UpperCAmelCase : Dict = DownloadConfig UpperCAmelCase : Tuple = DownloadMode UpperCAmelCase : List[Any] = DownloadManager del _arrow_dataset, _utils, _deprecated_download_manager
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : List[str] , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , **UpperCamelCase : Dict , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : List[Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = crop_size if crop_size is not None else {"""height""": 256, """width""": 256} __UpperCAmelCase : Tuple = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : Dict = do_resize __UpperCAmelCase : str = size __UpperCAmelCase : int = resample __UpperCAmelCase : Any = do_rescale __UpperCAmelCase : Union[str, Any] = rescale_factor __UpperCAmelCase : Optional[Any] = do_center_crop __UpperCAmelCase : Optional[int] = crop_size __UpperCAmelCase : Tuple = do_flip_channel_order def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PIL.Image.BILINEAR , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : Any = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None ): '''simple docstring''' return flip_channel_order(UpperCamelCase , data_format=UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : int = resample if resample is not None else self.resample __UpperCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : List[str] = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) __UpperCAmelCase : List[Any] = size if size is not None else self.size __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : Optional[int] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : int = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Dict = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : Optional[int] = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : int = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: __UpperCAmelCase : Optional[Any] = [self.flip_channel_order(image=UpperCamelCase ) for image in images] __UpperCAmelCase : Dict = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Tuple = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Tuple] = None ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(UpperCamelCase ) != len(UpperCamelCase ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(UpperCamelCase ): __UpperCAmelCase : List[str] = target_sizes.numpy() __UpperCAmelCase : Dict = [] for idx in range(len(UpperCamelCase ) ): __UpperCAmelCase : List[str] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(UpperCamelCase ) else: __UpperCAmelCase : Union[str, Any] = logits.argmax(dim=1 ) __UpperCAmelCase : List[str] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=A ) class lowerCamelCase__ ( A ): """simple docstring""" __a = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __a = Features({"""audio""": Audio()} ) __a = Features({"""transcription""": Value("""string""" )} ) __a = "audio" __a = "transcription" def lowerCamelCase__ ( self : Any , UpperCamelCase : int ): '''simple docstring''' if self.audio_column not in features: raise ValueError(f'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , UpperCamelCase ): raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' ) __UpperCAmelCase : Tuple = copy.deepcopy(self ) __UpperCAmelCase : Union[str, Any] = self.input_schema.copy() __UpperCAmelCase : Optional[int] = features[self.audio_column] __UpperCAmelCase : Optional[Any] = input_schema return task_template @property def lowerCamelCase__ ( self : Any ): '''simple docstring''' return {self.audio_column: "audio", self.transcription_column: "transcription"}
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" import unittest from transformers import AlbertTokenizer, AlbertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase : str = get_tests_dir('fixtures/spiece.model') @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = AlbertTokenizer __a = AlbertTokenizerFast __a = True __a = True __a = True def lowerCamelCase__ ( self : str ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : List[str] = AlbertTokenizer(UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Dict = """this is a test""" __UpperCAmelCase : Dict = """this is a test""" return input_text, output_text def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = """<pad>""" __UpperCAmelCase : List[str] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """▁eloquent""" ) self.assertEqual(len(UpperCamelCase ) , 30_000 ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 30_000 ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' if not self.test_rust_tokenizer: return __UpperCAmelCase : Union[str, Any] = self.get_tokenizer() __UpperCAmelCase : List[Any] = self.get_rust_tokenizer() __UpperCAmelCase : int = """I was born in 92000, and this is falsé.""" __UpperCAmelCase : int = tokenizer.tokenize(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) __UpperCAmelCase : Optional[int] = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = self.get_rust_tokenizer() __UpperCAmelCase : Tuple = tokenizer.encode(UpperCamelCase ) __UpperCAmelCase : List[Any] = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Dict = AlbertTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase , ["""▁this""", """▁is""", """▁a""", """▁test"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [48, 25, 21, 1_289] ) __UpperCAmelCase : str = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] ) __UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual(UpperCamelCase , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] ) __UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual( UpperCamelCase , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = AlbertTokenizer(UpperCamelCase ) __UpperCAmelCase : Tuple = tokenizer.encode("""sequence builders""" ) __UpperCAmelCase : Any = tokenizer.encode("""multi-sequence build""" ) __UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(UpperCamelCase ) __UpperCAmelCase : int = tokenizer.build_inputs_with_special_tokens(UpperCamelCase , UpperCamelCase ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ] @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[str] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import inspect import jax import jax.lax as lax import jax.numpy as jnp from ..utils import add_start_docstrings from ..utils.logging import get_logger UpperCAmelCase : Optional[Any] = get_logger(__name__) UpperCAmelCase : Tuple = R'\n Args:\n input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`PreTrainedTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`jnp.ndarray` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be logits for each vocabulary when not using beam\n search or log softmax for each vocabulary token when using beam search\n kwargs (`Dict[str, Any]`, *optional*):\n Additional logits processor specific kwargs.\n\n Return:\n `jnp.ndarray` of shape `(batch_size, config.vocab_size)`: The processed prediction scores.\n\n' class lowerCamelCase__ : """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCamelCase__ : """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self : Any , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray ): '''simple docstring''' raise NotImplementedError( f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' ) class lowerCamelCase__ ( A ): """simple docstring""" @add_start_docstrings(UpperCamelCase ) def __call__( self : List[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int , **UpperCamelCase : int ): '''simple docstring''' for processor in self: __UpperCAmelCase : str = inspect.signature(processor.__call__ ).parameters if len(UpperCamelCase ) > 3: if not all(arg in kwargs for arg in list(function_args.keys() )[2:] ): raise ValueError( f'''Make sure that all the required parameters: {list(function_args.keys() )} for ''' f'''{processor.__class__} are passed to the logits processor.''' ) __UpperCAmelCase : Any = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) else: __UpperCAmelCase : str = processor(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : float ): '''simple docstring''' if not isinstance(UpperCamelCase , UpperCamelCase ) or not (temperature > 0): raise ValueError(f'''`temperature` has to be a strictly positive float, but is {temperature}''' ) __UpperCAmelCase : Any = temperature def __call__( self : Optional[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Tuple = scores / self.temperature return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : str , UpperCamelCase : float , UpperCamelCase : float = -float("""Inf""" ) , UpperCamelCase : int = 1 ): '''simple docstring''' if not isinstance(UpperCamelCase , UpperCamelCase ) or (top_p < 0 or top_p > 1.0): raise ValueError(f'''`top_p` has to be a float > 0 and < 1, but is {top_p}''' ) if not isinstance(UpperCamelCase , UpperCamelCase ) or (min_tokens_to_keep < 1): raise ValueError(f'''`min_tokens_to_keep` has to be a positive integer, but is {min_tokens_to_keep}''' ) __UpperCAmelCase : Any = top_p __UpperCAmelCase : int = filter_value __UpperCAmelCase : List[str] = min_tokens_to_keep def __call__( self : Optional[Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : List[Any] = lax.top_k(UpperCamelCase , scores.shape[-1] ) __UpperCAmelCase : List[str] = jnp.full_like(UpperCamelCase , self.filter_value ) __UpperCAmelCase : List[Any] = jax.nn.softmax(UpperCamelCase , axis=-1 ).cumsum(axis=-1 ) __UpperCAmelCase : int = cumulative_probs < self.top_p # include the token that is higher than top_p as well __UpperCAmelCase : Dict = jnp.roll(UpperCamelCase , 1 ) score_mask |= score_mask.at[:, 0].set(UpperCamelCase ) # min tokens to keep __UpperCAmelCase : int = score_mask.at[:, : self.min_tokens_to_keep].set(UpperCamelCase ) __UpperCAmelCase : Dict = jnp.where(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[str] = jax.lax.sort_key_val(UpperCamelCase , UpperCamelCase )[-1] return next_scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : float = -float("""Inf""" ) , UpperCamelCase : int = 1 ): '''simple docstring''' if not isinstance(UpperCamelCase , UpperCamelCase ) or top_k <= 0: raise ValueError(f'''`top_k` has to be a strictly positive integer, but is {top_k}''' ) __UpperCAmelCase : Dict = max(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Tuple = filter_value def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = scores.shape __UpperCAmelCase : Any = jnp.full(batch_size * vocab_size , self.filter_value ) __UpperCAmelCase : Union[str, Any] = min(self.top_k , scores.shape[-1] ) # Safety check __UpperCAmelCase ,__UpperCAmelCase : List[Any] = lax.top_k(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Tuple = jnp.broadcast_to((jnp.arange(UpperCamelCase ) * vocab_size)[:, None] , (batch_size, topk) ).flatten() __UpperCAmelCase : str = topk_scores.flatten() __UpperCAmelCase : Tuple = topk_indices.flatten() + shift __UpperCAmelCase : str = next_scores_flat.at[topk_indices_flat].set(UpperCamelCase ) __UpperCAmelCase : Tuple = next_scores_flat.reshape(UpperCamelCase , UpperCamelCase ) return next_scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : str = bos_token_id def __call__( self : List[str] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Tuple = jnp.full(scores.shape , -float("""inf""" ) ) __UpperCAmelCase : Optional[int] = 1 - jnp.bool_(cur_len - 1 ) __UpperCAmelCase : Optional[int] = jnp.where(UpperCamelCase , new_scores.at[:, self.bos_token_id].set(0 ) , UpperCamelCase ) return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : List[str] = max_length __UpperCAmelCase : Optional[Any] = eos_token_id def __call__( self : Optional[int] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Any = jnp.full(scores.shape , -float("""inf""" ) ) __UpperCAmelCase : Union[str, Any] = 1 - jnp.bool_(cur_len - self.max_length + 1 ) __UpperCAmelCase : Optional[Any] = jnp.where(UpperCamelCase , new_scores.at[:, self.eos_token_id].set(0 ) , UpperCamelCase ) return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' if not isinstance(UpperCamelCase , UpperCamelCase ) or min_length < 0: raise ValueError(f'''`min_length` has to be a positive integer, but is {min_length}''' ) if not isinstance(UpperCamelCase , UpperCamelCase ) or eos_token_id < 0: raise ValueError(f'''`eos_token_id` has to be a positive integer, but is {eos_token_id}''' ) __UpperCAmelCase : Any = min_length __UpperCAmelCase : Optional[Any] = eos_token_id def __call__( self : Any , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : int = 1 - jnp.clip(cur_len - self.min_length , 0 , 1 ) __UpperCAmelCase : Any = jnp.where(UpperCamelCase , scores.at[:, self.eos_token_id].set(-float("""inf""" ) ) , UpperCamelCase ) return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = list(UpperCamelCase ) __UpperCAmelCase : Optional[int] = begin_index def __call__( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Tuple = 1 - jnp.bool_(cur_len - self.begin_index ) __UpperCAmelCase : Tuple = jnp.where(UpperCamelCase , scores.at[:, self.begin_suppress_tokens].set(-float("""inf""" ) ) , UpperCamelCase ) return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase : list ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = list(UpperCamelCase ) def __call__( self : Union[str, Any] , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : int = scores.at[..., self.suppress_tokens].set(-float("""inf""" ) ) return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Any , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = dict(UpperCamelCase ) # Converts the dictionary of format {index: token} containing the tokens to be forced to an array, where the # index of the array corresponds to the index of the token to be forced, for XLA compatibility. # Indexes without forced tokens will have a negative value. __UpperCAmelCase : str = jnp.ones((max(force_token_map.keys() ) + 1) , dtype=jnp.intaa ) * -1 for index, token in force_token_map.items(): if token is not None: __UpperCAmelCase : Optional[Any] = force_token_array.at[index].set(UpperCamelCase ) __UpperCAmelCase : Optional[int] = jnp.intaa(UpperCamelCase ) def __call__( self : Tuple , UpperCamelCase : jnp.ndarray , UpperCamelCase : jnp.ndarray , UpperCamelCase : int ): '''simple docstring''' def _force_token(UpperCamelCase : Optional[int] ): __UpperCAmelCase : Tuple = scores.shape[0] __UpperCAmelCase : Optional[int] = self.force_token_array[generation_idx] __UpperCAmelCase : Tuple = jnp.ones_like(UpperCamelCase , dtype=scores.dtype ) * -float("""inf""" ) __UpperCAmelCase : Union[str, Any] = jnp.zeros((batch_size, 1) , dtype=scores.dtype ) __UpperCAmelCase : Tuple = lax.dynamic_update_slice(UpperCamelCase , UpperCamelCase , (0, current_token) ) return new_scores __UpperCAmelCase : str = lax.cond( cur_len >= self.force_token_array.shape[0] , lambda: scores , lambda: lax.cond( self.force_token_array[cur_len] >= 0 , lambda: _force_token(UpperCamelCase ) , lambda: scores , ) , ) return scores class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = generate_config.eos_token_id __UpperCAmelCase : Optional[Any] = generate_config.no_timestamps_token_id __UpperCAmelCase : str = generate_config.no_timestamps_token_id + 1 __UpperCAmelCase : Dict = decoder_input_length + 1 if generate_config.is_multilingual: # room for language token and task token self.begin_index += 2 if hasattr(UpperCamelCase , """max_initial_timestamp_index""" ): __UpperCAmelCase : Optional[Any] = generate_config.max_initial_timestamp_index else: __UpperCAmelCase : int = model_config.vocab_size if self.max_initial_timestamp_index is None: __UpperCAmelCase : Union[str, Any] = model_config.vocab_size def __call__( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : List[str] = scores.at[:, self.no_timestamps_token_id].set(-float("""inf""" ) ) def handle_pairs(UpperCamelCase : int , UpperCamelCase : Union[str, Any] ): __UpperCAmelCase : Optional[Any] = jnp.where((cur_len - self.begin_index) >= 1 , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = jnp.where( input_ids_k[cur_len - 1] >= self.timestamp_begin , True and last_was_timestamp , UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = jnp.where((cur_len - self.begin_index) < 2 , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = jnp.where( input_ids_k[cur_len - 2] >= self.timestamp_begin , UpperCamelCase , UpperCamelCase , ) return jnp.where( UpperCamelCase , jnp.where( penultimate_was_timestamp > 0 , scores_k.at[self.timestamp_begin :].set(-float("""inf""" ) ) , scores_k.at[: self.eos_token_id].set(-float("""inf""" ) ) , ) , UpperCamelCase , ) __UpperCAmelCase : List[str] = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = jnp.where(cur_len == self.begin_index , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = jnp.where( self.max_initial_timestamp_index is not None , True and apply_max_initial_timestamp , UpperCamelCase , ) __UpperCAmelCase : str = self.timestamp_begin + self.max_initial_timestamp_index __UpperCAmelCase : Tuple = jnp.where( UpperCamelCase , scores.at[:, last_allowed + 1 :].set(-float("""inf""" ) ) , UpperCamelCase , ) # if sum of probability over timestamps is above any other token, sample timestamp __UpperCAmelCase : Union[str, Any] = jax.nn.log_softmax(UpperCamelCase , axis=-1 ) def handle_cumulative_probs(UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict ): __UpperCAmelCase : Dict = jax.nn.logsumexp(logprobs_k[self.timestamp_begin :] , axis=-1 ) __UpperCAmelCase : Optional[Any] = jnp.max(logprobs_k[: self.timestamp_begin] ) return jnp.where( timestamp_logprob > max_text_token_logprob , scores_k.at[: self.timestamp_begin].set(-float("""inf""" ) ) , UpperCamelCase , ) __UpperCAmelCase : Tuple = jax.vmap(UpperCamelCase )(UpperCamelCase , UpperCamelCase ) return scores
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" from typing import Dict, List from nltk.translate import gleu_score import datasets from datasets import MetricInfo UpperCAmelCase : Any = '\\n@misc{wu2016googles,\n title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n' UpperCAmelCase : Tuple = '\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe \'GLEU score\'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore\'s range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n' UpperCAmelCase : int = '\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n \'google_bleu\': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results["google_bleu"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results["google_bleu"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',\n ... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']\n >>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',\n ... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',\n ... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']\n >>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',\n ... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',\n ... \'heed\', \'the\', \'cat\', \'commands\']\n >>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',\n ... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',\n ... \'of\', \'the\', \'cat\']\n\n >>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',\n ... \'interested\', \'in\', \'world\', \'history\']\n >>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',\n ... \'because\', \'he\', \'read\', \'the\', \'book\']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric("google_bleu")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results["google_bleu"], 2))\n 0.4\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ), """references""": datasets.Sequence( datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ), } ) , ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[List[List[str]]] , UpperCamelCase : List[List[str]] , UpperCamelCase : int = 1 , UpperCamelCase : int = 4 , ): '''simple docstring''' return { "google_bleu": gleu_score.corpus_gleu( list_of_references=UpperCamelCase , hypotheses=UpperCamelCase , min_len=UpperCamelCase , max_len=UpperCamelCase ) }
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' pass @is_pipeline_test @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) __UpperCAmelCase : Optional[Any] = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Dict = vqa_pipeline(UpperCamelCase , top_k=1 ) self.assertEqual( UpperCamelCase , [ [{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}], [{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}], ] , ) @require_torch def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) __UpperCAmelCase : Any = """./tests/fixtures/tests_samples/COCO/000000039769.png""" __UpperCAmelCase : Union[str, Any] = """How many cats are there?""" __UpperCAmelCase : List[str] = vqa_pipeline(image=UpperCamelCase , question="""How many cats are there?""" , top_k=2 ) self.assertEqual( UpperCamelCase , [{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}, {"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}] ) __UpperCAmelCase : Union[str, Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( UpperCamelCase , [{"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}, {"""score""": ANY(UpperCamelCase ), """answer""": ANY(UpperCamelCase )}] ) @slow @require_torch def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" ) __UpperCAmelCase : Tuple = """./tests/fixtures/tests_samples/COCO/000000039769.png""" __UpperCAmelCase : Any = """How many cats are there?""" __UpperCAmelCase : List[Any] = vqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) __UpperCAmelCase : List[Any] = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}] ) __UpperCAmelCase : Union[str, Any] = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [[{"""score""": 0.8799, """answer""": """2"""}, {"""score""": 0.296, """answer""": """1"""}]] * 2 , ) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' pass
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__) class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __a = """timm_backbone""" def __init__( self : Optional[Any] , UpperCamelCase : List[str]=None , UpperCamelCase : int=3 , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=None , **UpperCamelCase : Tuple , ): '''simple docstring''' super().__init__(**__a ) __UpperCAmelCase : Any = backbone __UpperCAmelCase : Union[str, Any] = num_channels __UpperCAmelCase : Optional[int] = features_only __UpperCAmelCase : str = use_pretrained_backbone __UpperCAmelCase : Any = True __UpperCAmelCase : Union[str, Any] = out_indices if out_indices is not None else (-1,)
350
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from tokenizers import processors from ...tokenization_utils import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_mbart import MBartTokenizer else: UpperCAmelCase : List[Any] = None UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : List[str] = {'vocab_file': 'sentencepiece.bpe.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : Optional[Any] = { 'vocab_file': { 'facebook/mbart-large-en-ro': ( 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model' ), 'facebook/mbart-large-cc25': ( 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model' ), }, 'tokenizer_file': { 'facebook/mbart-large-en-ro': 'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json', 'facebook/mbart-large-cc25': 'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json', }, } UpperCAmelCase : Dict = { 'facebook/mbart-large-en-ro': 1024, 'facebook/mbart-large-cc25': 1024, } # fmt: off UpperCAmelCase : Union[str, Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN'] class lowerCamelCase__ ( A__ ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = PRETRAINED_VOCAB_FILES_MAP __a = ["input_ids", "attention_mask"] __a = MBartTokenizer __a = [] __a = [] def __init__( self : int , UpperCamelCase : Optional[Any]=None , UpperCamelCase : int=None , UpperCamelCase : List[Any]="<s>" , UpperCamelCase : List[Any]="</s>" , UpperCamelCase : List[str]="</s>" , UpperCamelCase : Dict="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : Any="<pad>" , UpperCamelCase : str="<mask>" , UpperCamelCase : Dict=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Union[str, Any]=None , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token super().__init__( vocab_file=__A , tokenizer_file=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , **__A , ) __UpperCAmelCase : Any = vocab_file __UpperCAmelCase : Any = False if not self.vocab_file else True __UpperCAmelCase : List[Any] = FAIRSEQ_LANGUAGE_CODES.copy() if additional_special_tokens is not None: # Only add those special tokens if they are not already there. _additional_special_tokens.extend( [t for t in additional_special_tokens if t not in _additional_special_tokens] ) self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} ) __UpperCAmelCase : Union[str, Any] = { lang_code: self.convert_tokens_to_ids(__A ) for lang_code in FAIRSEQ_LANGUAGE_CODES } __UpperCAmelCase : List[str] = src_lang if src_lang is not None else """en_XX""" __UpperCAmelCase : Tuple = self.convert_tokens_to_ids(self._src_lang ) __UpperCAmelCase : List[Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self._src_lang @src_lang.setter def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : int = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Any = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] = None ): '''simple docstring''' __UpperCAmelCase : Any = [self.sep_token_id] __UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __UpperCAmelCase : Any = src_lang __UpperCAmelCase : Dict = self(__A , add_special_tokens=__A , return_tensors=__A , **__A ) __UpperCAmelCase : List[Any] = self.convert_tokens_to_ids(__A ) __UpperCAmelCase : str = tgt_lang_id return inputs def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Dict = "en_XX" , UpperCamelCase : Tuple = None , UpperCamelCase : Any = "ro_RO" , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : str = src_lang __UpperCAmelCase : str = tgt_lang return super().prepare_seqaseq_batch(__A , __A , **__A ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.convert_tokens_to_ids(__A ) __UpperCAmelCase : Any = [] __UpperCAmelCase : Union[str, Any] = [self.eos_token_id, self.cur_lang_code] __UpperCAmelCase : Dict = self.convert_ids_to_tokens(self.prefix_tokens ) __UpperCAmelCase : Optional[Any] = self.convert_ids_to_tokens(self.suffix_tokens ) __UpperCAmelCase : int = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCamelCase__ ( self : Any , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = self.convert_tokens_to_ids(__A ) __UpperCAmelCase : Dict = [] __UpperCAmelCase : str = [self.eos_token_id, self.cur_lang_code] __UpperCAmelCase : str = self.convert_ids_to_tokens(self.prefix_tokens ) __UpperCAmelCase : int = self.convert_ids_to_tokens(self.suffix_tokens ) __UpperCAmelCase : Optional[int] = processors.TemplateProcessing( single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : str = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(__A ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' ) return __UpperCAmelCase : int = os.path.join( __A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ): copyfile(self.vocab_file , __A ) return (out_vocab_file,)
351
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
0
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear', 'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed', 'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'quantizer.weight_proj': 'quantizer.weight_proj', 'quantizer.vars': 'quantizer.codevectors', 'project_q': 'project_q', 'final_proj': 'project_hid', 'w2v_encoder.proj': 'ctc_proj', 'mask_emb': 'masked_spec_embed', } UpperCAmelCase : int = [ 'ctc_proj', 'quantizer.weight_proj', 'quantizer.codevectors', 'project_q', 'project_hid', ] def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> int: '''simple docstring''' for attribute in key.split(""".""" ): __UpperCAmelCase : Optional[int] = getattr(__UpperCAmelCase , __UpperCAmelCase ) if weight_type is not None: __UpperCAmelCase : Union[str, Any] = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape else: __UpperCAmelCase : int = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __UpperCAmelCase : Tuple = value elif weight_type == "weight_g": __UpperCAmelCase : Any = value elif weight_type == "weight_v": __UpperCAmelCase : Dict = value elif weight_type == "bias": __UpperCAmelCase : List[Any] = value else: __UpperCAmelCase : Tuple = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Dict: '''simple docstring''' __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : Optional[int] = fairseq_model.state_dict() __UpperCAmelCase : List[Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __UpperCAmelCase : Dict = False if "conv_layers" in name: load_conv_layer( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , ) __UpperCAmelCase : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]: __UpperCAmelCase : Optional[int] = True if "*" in mapped_key: __UpperCAmelCase : Optional[int] = name.split(__UpperCAmelCase )[0].split(""".""" )[-2] __UpperCAmelCase : str = mapped_key.replace("""*""" , __UpperCAmelCase ) if "weight_g" in name: __UpperCAmelCase : int = '''weight_g''' elif "weight_v" in name: __UpperCAmelCase : str = '''weight_v''' elif "bias" in name and "relative_attention_bias" not in name: __UpperCAmelCase : Optional[Any] = '''bias''' elif "weight" in name: # TODO: don't match quantizer.weight_proj __UpperCAmelCase : Tuple = '''weight''' else: __UpperCAmelCase : List[str] = None set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) continue if not is_used: unused_weights.append(__UpperCAmelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = full_name.split("""conv_layers.""" )[-1] __UpperCAmelCase : Optional[int] = name.split(""".""" ) __UpperCAmelCase : str = int(items[0] ) __UpperCAmelCase : Union[str, Any] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __UpperCAmelCase : str = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __UpperCAmelCase : str = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __UpperCAmelCase : Any = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __UpperCAmelCase : str = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__UpperCAmelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Tuple=None ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Any = torch.load(__UpperCAmelCase ) __UpperCAmelCase : List[Any] = WavLMConfigOrig(checkpoint["""cfg"""] ) __UpperCAmelCase : Any = WavLMOrig(__UpperCAmelCase ) model.load_state_dict(checkpoint["""model"""] ) model.eval() if config_path is not None: __UpperCAmelCase : Dict = WavLMConfig.from_pretrained(__UpperCAmelCase ) else: __UpperCAmelCase : List[Any] = WavLMConfig() __UpperCAmelCase : str = WavLMModel(__UpperCAmelCase ) recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase ) hf_wavlm.save_pretrained(__UpperCAmelCase ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') UpperCAmelCase : List[str] = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
352
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
0
"""simple docstring""" import unittest from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow if is_flax_available(): import jax from transformers.models.auto.modeling_flax_auto import FlaxAutoModel from transformers.models.bert.modeling_flax_bert import FlaxBertModel from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel @require_flax class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: with self.subTest(__snake_case ): __UpperCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) __UpperCAmelCase : Any = FlaxAutoModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: with self.subTest(__snake_case ): __UpperCAmelCase : Optional[Any] = AutoConfig.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) __UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained(__snake_case ) self.assertIsNotNone(__snake_case ) self.assertIsInstance(__snake_case , __snake_case ) @slow def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for model_name in ["bert-base-cased", "bert-large-uncased"]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__snake_case ) __UpperCAmelCase : List[Any] = FlaxBertModel.from_pretrained(__snake_case ) __UpperCAmelCase : Optional[int] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase : List[Any] ): return model(**__snake_case ) eval(**__snake_case ).block_until_ready() @slow def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' for model_name in ["roberta-base", "roberta-large"]: __UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(__snake_case ) __UpperCAmelCase : Tuple = FlaxRobertaModel.from_pretrained(__snake_case ) __UpperCAmelCase : Optional[int] = tokenizer("""Do you support jax jitted function?""" , return_tensors=TensorType.JAX ) @jax.jit def eval(**UpperCamelCase : List[str] ): return model(**__snake_case ) eval(**__snake_case ).block_until_ready() def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex( __snake_case , """bert-base is not a local folder and is not a valid model identifier""" ): __UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained("""bert-base""" ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' with self.assertRaisesRegex( __snake_case , R"""aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)""" ): __UpperCAmelCase : List[Any] = FlaxAutoModel.from_pretrained(__snake_case , revision="""aaaaaa""" ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' with self.assertRaisesRegex( __snake_case , """hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack""" , ): __UpperCAmelCase : Optional[int] = FlaxAutoModel.from_pretrained("""hf-internal-testing/config-no-model""" ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' with self.assertRaisesRegex(__snake_case , """Use `from_pt=True` to load this model""" ): __UpperCAmelCase : str = FlaxAutoModel.from_pretrained("""hf-internal-testing/tiny-bert-pt-only""" )
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import fire from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoTokenizer from utils import SeqaSeqDataset, pickle_save def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int]=1_0_2_4 , _UpperCamelCase : Union[str, Any]=1_0_2_4 , _UpperCamelCase : Any=False , **_UpperCamelCase : List[Any] ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""train""" , **lowerCamelCase__ ) __UpperCAmelCase : List[str] = tok.pad_token_id def get_lens(_UpperCamelCase : str ): __UpperCAmelCase : int = tqdm( DataLoader(lowerCamelCase__ , batch_size=5_1_2 , num_workers=8 , shuffle=lowerCamelCase__ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , ) __UpperCAmelCase : str = [] for batch in dl: __UpperCAmelCase : str = batch['''input_ids'''].ne(lowerCamelCase__ ).sum(1 ).tolist() __UpperCAmelCase : str = batch['''labels'''].ne(lowerCamelCase__ ).sum(1 ).tolist() if consider_target: for src, tgt in zip(lowerCamelCase__ , lowerCamelCase__ ): max_lens.append(max(lowerCamelCase__ , lowerCamelCase__ ) ) else: max_lens.extend(lowerCamelCase__ ) return max_lens __UpperCAmelCase : Any = get_lens(lowerCamelCase__ ) __UpperCAmelCase : Optional[Any] = SeqaSeqDataset(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , type_path="""val""" , **lowerCamelCase__ ) __UpperCAmelCase : Any = get_lens(lowerCamelCase__ ) pickle_save(lowerCamelCase__ , train_ds.len_file ) pickle_save(lowerCamelCase__ , val_ds.len_file ) if __name__ == "__main__": fire.Fire(save_len_file)
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import inspect import unittest from transformers import ConvNextVaConfig from transformers.models.auto import get_values from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : int , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any]=13 , UpperCamelCase : Optional[int]=32 , UpperCamelCase : int=3 , UpperCamelCase : List[str]=4 , UpperCamelCase : Optional[int]=[10, 20, 30, 40] , UpperCamelCase : Tuple=[2, 2, 3, 2] , UpperCamelCase : Dict=True , UpperCamelCase : List[str]=True , UpperCamelCase : Optional[Any]=37 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : Any=10 , UpperCamelCase : str=0.02 , UpperCamelCase : str=["stage2", "stage3", "stage4"] , UpperCamelCase : List[str]=[2, 3, 4] , UpperCamelCase : List[Any]=None , ): '''simple docstring''' __UpperCAmelCase : Dict = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : str = image_size __UpperCAmelCase : Tuple = num_channels __UpperCAmelCase : List[str] = num_stages __UpperCAmelCase : Optional[Any] = hidden_sizes __UpperCAmelCase : str = depths __UpperCAmelCase : int = is_training __UpperCAmelCase : Optional[int] = use_labels __UpperCAmelCase : Tuple = intermediate_size __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Union[str, Any] = num_labels __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : Union[str, Any] = out_features __UpperCAmelCase : List[str] = out_indices __UpperCAmelCase : Union[str, Any] = scope def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __UpperCAmelCase : Dict = None if self.use_labels: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return ConvNextVaConfig( num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Any = ConvNextVaModel(config=_a ) model.to(_a ) model.eval() __UpperCAmelCase : Union[str, Any] = model(_a ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = ConvNextVaForImageClassification(_a ) model.to(_a ) model.eval() __UpperCAmelCase : Optional[Any] = model(_a , labels=_a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : int = ConvNextVaBackbone(config=_a ) model.to(_a ) model.eval() __UpperCAmelCase : Any = model(_a ) # verify hidden states self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __UpperCAmelCase : str = None __UpperCAmelCase : Optional[Any] = ConvNextVaBackbone(config=_a ) model.to(_a ) model.eval() __UpperCAmelCase : Dict = model(_a ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Dict = self.prepare_config_and_inputs() __UpperCAmelCase : Optional[Any] = config_and_inputs __UpperCAmelCase : List[str] = {'pixel_values': pixel_values} return config, inputs_dict def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = self.prepare_config_and_inputs() __UpperCAmelCase : Tuple = config_and_inputs __UpperCAmelCase : Union[str, Any] = {'pixel_values': pixel_values, 'labels': labels} return config, inputs_dict @require_torch class lowerCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ): """simple docstring""" __a = ( ( ConvNextVaModel, ConvNextVaForImageClassification, ConvNextVaBackbone, ) if is_torch_available() else () ) __a = ( {'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification} if is_torch_available() else {} ) __a = False __a = False __a = False __a = False __a = False def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = ConvNextVaModelTester(self ) __UpperCAmelCase : Dict = ConfigTester(self , config_class=_a , has_text_modality=_a , hidden_size=37 ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return @unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' pass @unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase__ ( self : str ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : List[str] = True if model_class.__name__ in [ *get_values(_a ), *get_values(_a ), ]: continue __UpperCAmelCase : Union[str, Any] = model_class(_a ) model.to(_a ) model.train() __UpperCAmelCase : Any = self._prepare_for_class(_a , _a , return_labels=_a ) __UpperCAmelCase : Optional[int] = model(**_a ).loss loss.backward() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' if not self.model_tester.is_training: return for model_class in self.all_model_classes: __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels() __UpperCAmelCase : Tuple = False __UpperCAmelCase : int = True if ( model_class.__name__ in [*get_values(_a ), *get_values(_a )] or not model_class.supports_gradient_checkpointing ): continue __UpperCAmelCase : List[Any] = model_class(_a ) model.to(_a ) model.gradient_checkpointing_enable() model.train() __UpperCAmelCase : Any = self._prepare_for_class(_a , _a , return_labels=_a ) __UpperCAmelCase : str = model(**_a ).loss loss.backward() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Optional[int] = model_class(_a ) __UpperCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : Optional[Any] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _a ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_a ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' def check_hidden_states_output(UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : List[Any] ): __UpperCAmelCase : Any = model_class(_a ) model.to(_a ) model.eval() with torch.no_grad(): __UpperCAmelCase : int = model(**self._prepare_for_class(_a , _a ) ) __UpperCAmelCase : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __UpperCAmelCase : Tuple = self.model_tester.num_stages self.assertEqual(len(_a ) , expected_num_stages + 1 ) # ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Any = True check_hidden_states_output(_a , _a , _a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Any = True check_hidden_states_output(_a , _a , _a ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_a ) @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Optional[int] = ConvNextVaModel.from_pretrained(_a ) self.assertIsNotNone(_a ) def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None @slow def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : str = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(_a ) __UpperCAmelCase : Any = self.default_image_processor __UpperCAmelCase : Dict = prepare_img() __UpperCAmelCase : Tuple = preprocessor(images=_a , return_tensors="""pt""" ).to(_a ) # forward pass with torch.no_grad(): __UpperCAmelCase : str = model(**_a ) # verify the logits __UpperCAmelCase : int = torch.Size((1, 1_000) ) self.assertEqual(outputs.logits.shape , _a ) __UpperCAmelCase : int = torch.tensor([0.9996, 0.1966, -0.4386] ).to(_a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _a , atol=1e-4 ) )
355
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
0
"""simple docstring""" import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> Any: '''simple docstring''' __UpperCAmelCase : List[str] = multiprocessing.Manager() __UpperCAmelCase : Tuple = manager.list() __UpperCAmelCase : Optional[int] = multiprocessing.Process(target=__snake_case , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> int: '''simple docstring''' with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil __UpperCAmelCase : Tuple = shutil.rmtree __UpperCAmelCase : Any = os.rmdir __UpperCAmelCase : int = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: __UpperCAmelCase : Optional[int] = {} with swallow_io(): with time_limit(__snake_case ): exec(__snake_case , __snake_case ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(f'''failed: {e}''' ) # Needed for cleaning up. __UpperCAmelCase : List[str] = rmtree __UpperCAmelCase : str = rmdir __UpperCAmelCase : Dict = chdir @contextlib.contextmanager def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> List[str]: '''simple docstring''' def signal_handler(_UpperCamelCase : str , _UpperCamelCase : int ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __snake_case ) signal.signal(signal.SIGALRM , __snake_case ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : int = WriteOnlyStringIO() with contextlib.redirect_stdout(__snake_case ): with contextlib.redirect_stderr(__snake_case ): with redirect_stdin(__snake_case ): yield @contextlib.contextmanager def lowerCamelCase ( ) -> Dict: '''simple docstring''' with tempfile.TemporaryDirectory() as dirname: with chdir(__snake_case ): yield dirname class lowerCamelCase__ ( lowerCamelCase__ ): """simple docstring""" pass class lowerCamelCase__ ( io.StringIO ): """simple docstring""" def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Tuple , **UpperCamelCase : int ): '''simple docstring''' raise OSError def lowerCamelCase__ ( self : Optional[int] , *UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' raise OSError def lowerCamelCase__ ( self : Union[str, Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : List[Any] ): '''simple docstring''' raise OSError def lowerCamelCase__ ( self : int , *UpperCamelCase : List[str] , **UpperCamelCase : List[str] ): '''simple docstring''' return False class lowerCamelCase__ ( contextlib._RedirectStream ): # type: ignore """simple docstring""" __a = 'stdin' @contextlib.contextmanager def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> int: '''simple docstring''' if root == ".": yield return __UpperCAmelCase : Optional[Any] = os.getcwd() os.chdir(__snake_case ) try: yield except BaseException as exc: raise exc finally: os.chdir(__snake_case ) def lowerCamelCase ( _UpperCamelCase : Optional[Any]=None ) -> Tuple: '''simple docstring''' if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : List[str] = None import os __UpperCAmelCase : int = """1""" __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Any = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : str = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Optional[Any] = None __UpperCAmelCase : str = None __UpperCAmelCase : int = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : int = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : int = None __UpperCAmelCase : Any = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : str = None __UpperCAmelCase : Dict = None __UpperCAmelCase : List[str] = None import shutil __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : List[Any] = None import subprocess __UpperCAmelCase : int = None # type: ignore __UpperCAmelCase : int = None import sys __UpperCAmelCase : List[str] = None __UpperCAmelCase : int = None __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Optional[int] = None __UpperCAmelCase : Any = None
356
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...file_utils import TensorType, is_torch_available from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : str = { "facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json", # See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small } class lowerCamelCase__ ( A ): """simple docstring""" __a = '''blenderbot-small''' __a = ['''past_key_values'''] __a = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''} def __init__( self : List[str] , UpperCamelCase : List[str]=50_265 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : Optional[Any]=8 , UpperCamelCase : str=2_048 , UpperCamelCase : Tuple=16 , UpperCamelCase : int=8 , UpperCamelCase : int=2_048 , UpperCamelCase : Dict=16 , UpperCamelCase : Any=0.0 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Tuple=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : int="gelu" , UpperCamelCase : List[str]=512 , UpperCamelCase : int=0.1 , UpperCamelCase : str=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Dict=False , UpperCamelCase : Tuple=0 , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Dict=2 , UpperCamelCase : Union[str, Any]=2 , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : List[str] = vocab_size __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : Any = d_model __UpperCAmelCase : str = encoder_ffn_dim __UpperCAmelCase : Any = encoder_layers __UpperCAmelCase : Union[str, Any] = encoder_attention_heads __UpperCAmelCase : Optional[int] = decoder_ffn_dim __UpperCAmelCase : Tuple = decoder_layers __UpperCAmelCase : Union[str, Any] = decoder_attention_heads __UpperCAmelCase : Dict = dropout __UpperCAmelCase : Optional[Any] = attention_dropout __UpperCAmelCase : int = activation_dropout __UpperCAmelCase : Optional[int] = activation_function __UpperCAmelCase : Optional[int] = init_std __UpperCAmelCase : str = encoder_layerdrop __UpperCAmelCase : Tuple = decoder_layerdrop __UpperCAmelCase : Optional[int] = use_cache __UpperCAmelCase : Dict = encoder_layers __UpperCAmelCase : Union[str, Any] = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , is_encoder_decoder=_snake_case , decoder_start_token_id=_snake_case , forced_eos_token_id=_snake_case , **_snake_case , ) class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : int ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : List[Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __UpperCAmelCase : Dict = {0: """batch"""} __UpperCAmelCase : List[str] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} else: __UpperCAmelCase : int = {0: """batch""", 1: """decoder_sequence"""} __UpperCAmelCase : Optional[int] = {0: """batch""", 1: """decoder_sequence"""} if self.use_past: self.fill_with_past_key_values_(_snake_case , direction="""inputs""" ) elif self.task == "causal-lm": # TODO: figure this case out. __UpperCAmelCase : List[str] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ] ) if self.use_past: __UpperCAmelCase ,__UpperCAmelCase : Dict = self.num_layers for i in range(_snake_case ): __UpperCAmelCase : List[Any] = {0: """batch""", 2: """past_sequence + sequence"""} __UpperCAmelCase : int = {0: """batch""", 2: """past_sequence + sequence"""} else: __UpperCAmelCase : Union[str, Any] = OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """encoder_sequence"""}), ("""attention_mask""", {0: """batch""", 1: """encoder_sequence"""}), ("""decoder_input_ids""", {0: """batch""", 1: """decoder_sequence"""}), ("""decoder_attention_mask""", {0: """batch""", 1: """decoder_sequence"""}), ] ) return common_inputs @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : str = super().outputs else: __UpperCAmelCase : int = super(_snake_case , self ).outputs if self.use_past: __UpperCAmelCase ,__UpperCAmelCase : Any = self.num_layers for i in range(_snake_case ): __UpperCAmelCase : Optional[int] = {0: """batch""", 2: """past_sequence + sequence"""} __UpperCAmelCase : Union[str, Any] = {0: """batch""", 2: """past_sequence + sequence"""} return common_outputs def lowerCamelCase__ ( self : Any , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' __UpperCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) # Generate decoder inputs __UpperCAmelCase : Tuple = seq_length if not self.use_past else 1 __UpperCAmelCase : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) __UpperCAmelCase : Dict = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()} __UpperCAmelCase : List[Any] = dict(**_snake_case , **_snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCAmelCase ,__UpperCAmelCase : int = common_inputs["""input_ids"""].shape __UpperCAmelCase : Union[str, Any] = common_inputs["""decoder_input_ids"""].shape[1] __UpperCAmelCase ,__UpperCAmelCase : List[str] = self.num_attention_heads __UpperCAmelCase : List[str] = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCAmelCase : List[Any] = decoder_seq_length + 3 __UpperCAmelCase : Optional[Any] = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) __UpperCAmelCase : Any = torch.cat( [common_inputs["""decoder_attention_mask"""], torch.ones(_snake_case , _snake_case )] , dim=1 ) __UpperCAmelCase : Union[str, Any] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.num_layers __UpperCAmelCase : Dict = min(_snake_case , _snake_case ) __UpperCAmelCase : Union[str, Any] = max(_snake_case , _snake_case ) - min_num_layers __UpperCAmelCase : str = """encoder""" if num_encoder_layers > num_decoder_layers else """decoder""" for _ in range(_snake_case ): common_inputs["past_key_values"].append( ( torch.zeros(_snake_case ), torch.zeros(_snake_case ), torch.zeros(_snake_case ), torch.zeros(_snake_case ), ) ) # TODO: test this. __UpperCAmelCase : Dict = encoder_shape if remaining_side_name == """encoder""" else decoder_shape for _ in range(_snake_case , _snake_case ): common_inputs["past_key_values"].append((torch.zeros(_snake_case ), torch.zeros(_snake_case )) ) return common_inputs def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' __UpperCAmelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ) if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCAmelCase : List[str] = seqlen + 2 __UpperCAmelCase ,__UpperCAmelCase : str = self.num_layers __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.num_attention_heads __UpperCAmelCase : Union[str, Any] = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) __UpperCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""].dtype __UpperCAmelCase : Optional[Any] = torch.cat( [common_inputs["""attention_mask"""], torch.ones(_snake_case , _snake_case , dtype=_snake_case )] , dim=1 ) __UpperCAmelCase : int = [ (torch.zeros(_snake_case ), torch.zeros(_snake_case )) for _ in range(_snake_case ) ] return common_inputs def lowerCamelCase__ ( self : Any , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __UpperCAmelCase : List[Any] = tokenizer.num_special_tokens_to_add(_snake_case ) __UpperCAmelCase : str = compute_effective_axis_dimension( _snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_snake_case ) # Generate dummy inputs according to compute batch and sequence __UpperCAmelCase : Any = [""" """.join([tokenizer.unk_token] ) * seq_length] * batch_size __UpperCAmelCase : Dict = dict(tokenizer(_snake_case , return_tensors=_snake_case ) ) return common_inputs def lowerCamelCase__ ( self : Dict , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case ) elif self.task == "causal-lm": __UpperCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_causal_lm( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case ) else: __UpperCAmelCase : Optional[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( _snake_case , batch_size=_snake_case , seq_length=_snake_case , is_pair=_snake_case , framework=_snake_case ) return common_inputs def lowerCamelCase__ ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] ): '''simple docstring''' if self.task in ["default", "seq2seq-lm"]: __UpperCAmelCase : Union[str, Any] = super()._flatten_past_key_values_(_snake_case , _snake_case , _snake_case , _snake_case ) else: __UpperCAmelCase : Optional[int] = super(_snake_case , self )._flatten_past_key_values_( _snake_case , _snake_case , _snake_case , _snake_case )
357
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
0
"""simple docstring""" import inspect import os import unittest from dataclasses import dataclass import torch from accelerate import Accelerator, DistributedDataParallelKwargs, GradScalerKwargs from accelerate.state import AcceleratorState from accelerate.test_utils import execute_subprocess_async, require_cuda, require_multi_gpu from accelerate.utils import KwargsHandler @dataclass class lowerCamelCase__ ( lowercase__ ): """simple docstring""" __a = 0 __a = False __a = 3.0 class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.assertDictEqual(MockClass().to_kwargs() , {} ) self.assertDictEqual(MockClass(a=2 ).to_kwargs() , {"""a""": 2} ) self.assertDictEqual(MockClass(a=2 , b=_UpperCamelCase ).to_kwargs() , {"""a""": 2, """b""": True} ) self.assertDictEqual(MockClass(a=2 , c=2.25 ).to_kwargs() , {"""a""": 2, """c""": 2.25} ) @require_cuda def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = GradScalerKwargs(init_scale=1_024 , growth_factor=2 ) AcceleratorState._reset_state() __UpperCAmelCase : Union[str, Any] = Accelerator(mixed_precision="""fp16""" , kwargs_handlers=[scaler_handler] ) print(accelerator.use_fpaa ) __UpperCAmelCase : Optional[Any] = accelerator.scaler # Check the kwargs have been applied self.assertEqual(scaler._init_scale , 1_024.0 ) self.assertEqual(scaler._growth_factor , 2.0 ) # Check the other values are at the default self.assertEqual(scaler._backoff_factor , 0.5 ) self.assertEqual(scaler._growth_interval , 2_000 ) self.assertEqual(scaler._enabled , _UpperCamelCase ) @require_multi_gpu def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Dict = ["""torchrun""", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )] execute_subprocess_async(_UpperCamelCase , env=os.environ.copy() ) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = DistributedDataParallelKwargs(bucket_cap_mb=15, find_unused_parameters=True) UpperCAmelCase : List[str] = Accelerator(kwargs_handlers=[ddp_scaler]) UpperCAmelCase : List[str] = torch.nn.Linear(100, 200) UpperCAmelCase : str = accelerator.prepare(model) # Check the values changed in kwargs UpperCAmelCase : str = '' UpperCAmelCase : List[Any] = model.bucket_bytes_cap // (1024 * 1024) if observed_bucket_cap_map != 15: error_msg += F"Kwargs badly passed, should have `15` but found {observed_bucket_cap_map}.\n" if model.find_unused_parameters is not True: error_msg += F"Kwargs badly passed, should have `True` but found {model.find_unused_parameters}.\n" # Check the values of the defaults if model.dim != 0: error_msg += F"Default value not respected, should have `0` but found {model.dim}.\n" if model.broadcast_buffers is not True: error_msg += F"Default value not respected, should have `True` but found {model.broadcast_buffers}.\n" if model.gradient_as_bucket_view is not False: error_msg += F"Default value not respected, should have `False` but found {model.gradient_as_bucket_view}.\n" # Raise error at the end to make sure we don't stop at the first failure. if len(error_msg) > 0: raise ValueError(error_msg)
358
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
0
"""simple docstring""" import argparse import json import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from torchvision import transforms from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling def lowerCamelCase ( _UpperCamelCase : Any ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : int = [2, 2, 6, 2] if """tiny""" in model_name else [2, 2, 1_8, 2] __UpperCAmelCase : Any = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : List[Any] = True if """large""" in model_name or """huge""" in model_name else False __UpperCAmelCase : str = True if """large""" in model_name or """huge""" in model_name else False if "large" in model_name or "xlarge" in model_name or "huge" in model_name: if "fl3" in model_name: __UpperCAmelCase : Optional[int] = [3, 3, 3, 3] __UpperCAmelCase : List[str] = [5, 5, 5, 5] elif "fl4" in model_name: __UpperCAmelCase : List[str] = [4, 4, 4, 4] __UpperCAmelCase : str = [3, 3, 3, 3] if "tiny" in model_name or "small" in model_name or "base" in model_name: __UpperCAmelCase : Optional[Any] = [3, 3, 3, 3] if "lrf" in model_name: __UpperCAmelCase : Optional[int] = [3, 3, 3, 3] else: __UpperCAmelCase : Dict = [2, 2, 2, 2] if "tiny" in model_name: __UpperCAmelCase : Union[str, Any] = 9_6 elif "small" in model_name: __UpperCAmelCase : str = 9_6 elif "base" in model_name: __UpperCAmelCase : Any = 1_2_8 elif "large" in model_name: __UpperCAmelCase : Union[str, Any] = 1_9_2 elif "xlarge" in model_name: __UpperCAmelCase : int = 2_5_6 elif "huge" in model_name: __UpperCAmelCase : Dict = 3_5_2 # set label information __UpperCAmelCase : str = """huggingface/label-files""" if "large" in model_name or "huge" in model_name: __UpperCAmelCase : Dict = """imagenet-22k-id2label.json""" else: __UpperCAmelCase : Any = """imagenet-1k-id2label.json""" __UpperCAmelCase : List[str] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : List[str] = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Any = {v: k for k, v in idalabel.items()} __UpperCAmelCase : Union[str, Any] = FocalNetConfig( embed_dim=_UpperCamelCase , depths=_UpperCamelCase , focal_levels=_UpperCamelCase , focal_windows=_UpperCamelCase , use_conv_embed=_UpperCamelCase , idalabel=_UpperCamelCase , labelaid=_UpperCamelCase , use_post_layernorm=_UpperCamelCase , use_layerscale=_UpperCamelCase , ) return config def lowerCamelCase ( _UpperCamelCase : Any ) -> List[Any]: '''simple docstring''' if "patch_embed.proj" in name: __UpperCAmelCase : Tuple = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __UpperCAmelCase : Optional[int] = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if "layers" in name: __UpperCAmelCase : Union[str, Any] = """encoder.""" + name if "encoder.layers" in name: __UpperCAmelCase : List[Any] = name.replace("""encoder.layers""" , """encoder.stages""" ) if "downsample.proj" in name: __UpperCAmelCase : Dict = name.replace("""downsample.proj""" , """downsample.projection""" ) if "blocks" in name: __UpperCAmelCase : Dict = name.replace("""blocks""" , """layers""" ) if "modulation.f.weight" in name or "modulation.f.bias" in name: __UpperCAmelCase : Optional[int] = name.replace("""modulation.f""" , """modulation.projection_in""" ) if "modulation.h.weight" in name or "modulation.h.bias" in name: __UpperCAmelCase : int = name.replace("""modulation.h""" , """modulation.projection_context""" ) if "modulation.proj.weight" in name or "modulation.proj.bias" in name: __UpperCAmelCase : List[Any] = name.replace("""modulation.proj""" , """modulation.projection_out""" ) if name == "norm.weight": __UpperCAmelCase : Optional[Any] = """layernorm.weight""" if name == "norm.bias": __UpperCAmelCase : Dict = """layernorm.bias""" if "head" in name: __UpperCAmelCase : str = name.replace("""head""" , """classifier""" ) else: __UpperCAmelCase : Optional[Any] = """focalnet.""" + name return name def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Any , _UpperCamelCase : int=False ) -> str: '''simple docstring''' __UpperCAmelCase : str = { """focalnet-tiny""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth""", """focalnet-tiny-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth""", """focalnet-small""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth""", """focalnet-small-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth""", """focalnet-base""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth""", """focalnet-base-lrf""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth""", """focalnet-large-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth""", """focalnet-large-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth""", """focalnet-xlarge-lrf-fl3""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth""", """focalnet-xlarge-lrf-fl4""": """https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth""", } # fmt: on __UpperCAmelCase : str = model_name_to_url[model_name] print("""Checkpoint URL: """ , _UpperCamelCase ) __UpperCAmelCase : Dict = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="""cpu""" )["""model"""] # rename keys for key in state_dict.copy().keys(): __UpperCAmelCase : int = state_dict.pop(_UpperCamelCase ) __UpperCAmelCase : Tuple = val __UpperCAmelCase : Tuple = get_focalnet_config(_UpperCamelCase ) __UpperCAmelCase : List[Any] = FocalNetForImageClassification(_UpperCamelCase ) model.eval() # load state dict model.load_state_dict(_UpperCamelCase ) # verify conversion __UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Tuple = BitImageProcessor( do_resize=_UpperCamelCase , size={"""shortest_edge""": 2_5_6} , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase , crop_size=2_2_4 , do_normalize=_UpperCamelCase , image_mean=_UpperCamelCase , image_std=_UpperCamelCase , ) __UpperCAmelCase : List[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) __UpperCAmelCase : int = processor(images=_UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Union[str, Any] = transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ), ] ) __UpperCAmelCase : List[str] = image_transforms(_UpperCamelCase ).unsqueeze(0 ) # verify pixel_values assert torch.allclose(inputs.pixel_values , _UpperCamelCase , atol=1E-4 ) __UpperCAmelCase : int = model(**_UpperCamelCase ) __UpperCAmelCase : Tuple = outputs.logits.argmax(-1 ).item() print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] ) print("""First values of logits:""" , outputs.logits[0, :3] ) if model_name == "focalnet-tiny": __UpperCAmelCase : Tuple = torch.tensor([0.2_166, -0.4_368, 0.2_191] ) elif model_name == "focalnet-tiny-lrf": __UpperCAmelCase : Optional[int] = torch.tensor([1.1_669, 0.0_125, -0.1_695] ) elif model_name == "focalnet-small": __UpperCAmelCase : Tuple = torch.tensor([0.4_917, -0.0_430, 0.1_341] ) elif model_name == "focalnet-small-lrf": __UpperCAmelCase : Any = torch.tensor([-0.2_588, -0.5_342, -0.2_331] ) elif model_name == "focalnet-base": __UpperCAmelCase : int = torch.tensor([-0.1_655, -0.4_090, -0.1_730] ) elif model_name == "focalnet-base-lrf": __UpperCAmelCase : List[str] = torch.tensor([0.5_306, -0.0_483, -0.3_928] ) assert torch.allclose(outputs.logits[0, :3] , _UpperCamelCase , atol=1E-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) if push_to_hub: print(f'''Pushing model and processor of {model_name} to the hub...''' ) model.push_to_hub(f'''{model_name}''' ) processor.push_to_hub(f'''{model_name}''' ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='focalnet-tiny', type=str, help='Name of the FocalNet model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub.', ) UpperCAmelCase : Optional[int] = parser.parse_args() convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
359
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
0
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCamelCase__ ( A_ , unittest.TestCase ): """simple docstring""" __a = BioGptTokenizer __a = False def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt __UpperCAmelCase : Dict = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] __UpperCAmelCase : int = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) __UpperCAmelCase : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] __UpperCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , """w""" ) as fp: fp.write("""\n""".join(snake_case__ ) ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : str = "lower newer" __UpperCAmelCase : List[str] = "lower newer" return input_text, output_text def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) __UpperCAmelCase : List[str] = "lower" __UpperCAmelCase : Optional[int] = ["low", "er</w>"] __UpperCAmelCase : List[Any] = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) __UpperCAmelCase : str = tokens + ["<unk>"] __UpperCAmelCase : Dict = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) __UpperCAmelCase : Optional[Any] = tokenizer.encode("""sequence builders""" , add_special_tokens=snake_case__ ) __UpperCAmelCase : List[Any] = tokenizer.encode("""multi-sequence build""" , add_special_tokens=snake_case__ ) __UpperCAmelCase : Tuple = tokenizer.build_inputs_with_special_tokens(snake_case__ ) __UpperCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
360
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
0
"""simple docstring""" import socket def lowerCamelCase ( ) -> Dict: '''simple docstring''' __UpperCAmelCase : List[str] = socket.socket(socket.AF_INET , socket.SOCK_STREAM ) __UpperCAmelCase : str = socket.gethostname() __UpperCAmelCase : Union[str, Any] = 1_2_3_1_2 sock.connect((host, port) ) sock.send(b"""Hello server!""" ) with open("""Received_file""" , """wb""" ) as out_file: print("""File opened""" ) print("""Receiving data...""" ) while True: __UpperCAmelCase : Any = sock.recv(1_0_2_4 ) if not data: break out_file.write(lowerCAmelCase__ ) print("""Successfully received the file""" ) sock.close() print("""Connection closed""" ) if __name__ == "__main__": main()
361
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
0
"""simple docstring""" from scipy.stats import pearsonr, spearmanr from sklearn.metrics import fa_score, matthews_corrcoef import datasets UpperCAmelCase : Optional[int] = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n' UpperCAmelCase : Tuple = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n' UpperCAmelCase : List[str] = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"pearson\": Pearson Correlation\n \"spearmanr\": Spearman Correlation\n \"matthews_correlation\": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of [\"mnli\", \"mnli_mismatched\", \"mnli_matched\", \"qnli\", \"rte\", \"wnli\", \"hans\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({\"pearson\": round(results[\"pearson\"], 2), \"spearmanr\": round(results[\"spearmanr\"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n' def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Tuple ) -> Any: '''simple docstring''' return float((preds == labels).mean() ) def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = simple_accuracy(lowercase__ , lowercase__ ) __UpperCAmelCase : Dict = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) ) return { "accuracy": acc, "f1": fa, } def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Any ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Optional[int] = float(pearsonr(lowercase__ , lowercase__ )[0] ) __UpperCAmelCase : int = float(spearmanr(lowercase__ , lowercase__ )[0] ) return { "pearson": pearson_corr, "spearmanr": spearman_corr, } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' if self.config_name not in [ "sst2", "mnli", "mnli_mismatched", "mnli_matched", "cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans", ]: raise KeyError( """You should supply a configuration name selected in """ """[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """ """\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ), """references""": datasets.Value("""int64""" if self.config_name != """stsb""" else """float32""" ), } ) , codebase_urls=[] , reference_urls=[] , format="""numpy""" , ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' if self.config_name == "cola": return {"matthews_correlation": matthews_corrcoef(UpperCamelCase , UpperCamelCase )} elif self.config_name == "stsb": return pearson_and_spearman(UpperCamelCase , UpperCamelCase ) elif self.config_name in ["mrpc", "qqp"]: return acc_and_fa(UpperCamelCase , UpperCamelCase ) elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]: return {"accuracy": simple_accuracy(UpperCamelCase , UpperCamelCase )} else: raise KeyError( """You should supply a configuration name selected in """ """[\"sst2\", \"mnli\", \"mnli_mismatched\", \"mnli_matched\", """ """\"cola\", \"stsb\", \"mrpc\", \"qqp\", \"qnli\", \"rte\", \"wnli\", \"hans\"]""" )
362
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
0
"""simple docstring""" UpperCAmelCase : int = [ 999, 800, 799, 600, 599, 500, 400, 399, 377, 355, 333, 311, 288, 266, 244, 222, 200, 199, 177, 155, 133, 111, 88, 66, 44, 22, 0, ] UpperCAmelCase : Optional[int] = [ 999, 976, 952, 928, 905, 882, 858, 857, 810, 762, 715, 714, 572, 429, 428, 286, 285, 238, 190, 143, 142, 118, 95, 71, 47, 24, 0, ] UpperCAmelCase : List[Any] = [ 999, 988, 977, 966, 955, 944, 933, 922, 911, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 350, 300, 299, 266, 233, 200, 199, 179, 159, 140, 120, 100, 99, 88, 77, 66, 55, 44, 33, 22, 11, 0, ] UpperCAmelCase : Optional[int] = [ 999, 995, 992, 989, 985, 981, 978, 975, 971, 967, 964, 961, 957, 956, 951, 947, 942, 937, 933, 928, 923, 919, 914, 913, 908, 903, 897, 892, 887, 881, 876, 871, 870, 864, 858, 852, 846, 840, 834, 828, 827, 820, 813, 806, 799, 792, 785, 784, 777, 770, 763, 756, 749, 742, 741, 733, 724, 716, 707, 699, 698, 688, 677, 666, 656, 655, 645, 634, 623, 613, 612, 598, 584, 570, 569, 555, 541, 527, 526, 505, 484, 483, 462, 440, 439, 396, 395, 352, 351, 308, 307, 264, 263, 220, 219, 176, 132, 88, 44, 0, ] UpperCAmelCase : int = [ 999, 997, 995, 992, 990, 988, 986, 984, 981, 979, 977, 975, 972, 970, 968, 966, 964, 961, 959, 957, 956, 954, 951, 949, 946, 944, 941, 939, 936, 934, 931, 929, 926, 924, 921, 919, 916, 914, 913, 910, 907, 905, 902, 899, 896, 893, 891, 888, 885, 882, 879, 877, 874, 871, 870, 867, 864, 861, 858, 855, 852, 849, 846, 843, 840, 837, 834, 831, 828, 827, 824, 821, 817, 814, 811, 808, 804, 801, 798, 795, 791, 788, 785, 784, 780, 777, 774, 770, 766, 763, 760, 756, 752, 749, 746, 742, 741, 737, 733, 730, 726, 722, 718, 714, 710, 707, 703, 699, 698, 694, 690, 685, 681, 677, 673, 669, 664, 660, 656, 655, 650, 646, 641, 636, 632, 627, 622, 618, 613, 612, 607, 602, 596, 591, 586, 580, 575, 570, 569, 563, 557, 551, 545, 539, 533, 527, 526, 519, 512, 505, 498, 491, 484, 483, 474, 466, 457, 449, 440, 439, 428, 418, 407, 396, 395, 381, 366, 352, 351, 330, 308, 307, 286, 264, 263, 242, 220, 219, 176, 175, 132, 131, 88, 44, 0, ] UpperCAmelCase : Optional[Any] = [ 999, 991, 982, 974, 966, 958, 950, 941, 933, 925, 916, 908, 900, 899, 874, 850, 825, 800, 799, 700, 600, 500, 400, 300, 200, 100, 0, ] UpperCAmelCase : Optional[Any] = [ 999, 992, 985, 978, 971, 964, 957, 949, 942, 935, 928, 921, 914, 907, 900, 899, 879, 859, 840, 820, 800, 799, 766, 733, 700, 699, 650, 600, 599, 500, 499, 400, 399, 300, 299, 200, 199, 100, 99, 0, ] UpperCAmelCase : Optional[int] = [ 999, 996, 992, 989, 985, 982, 979, 975, 972, 968, 965, 961, 958, 955, 951, 948, 944, 941, 938, 934, 931, 927, 924, 920, 917, 914, 910, 907, 903, 900, 899, 891, 884, 876, 869, 861, 853, 846, 838, 830, 823, 815, 808, 800, 799, 788, 777, 766, 755, 744, 733, 722, 711, 700, 699, 688, 677, 666, 655, 644, 633, 622, 611, 600, 599, 585, 571, 557, 542, 528, 514, 500, 499, 485, 471, 457, 442, 428, 414, 400, 399, 379, 359, 340, 320, 300, 299, 279, 259, 240, 220, 200, 199, 166, 133, 100, 99, 66, 33, 0, ]
363
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
0
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[Any] ) -> int: '''simple docstring''' __UpperCAmelCase : int = args.log_outputs __UpperCAmelCase : Dict = """_""".join(args.dataset.split("""/""" ) + [args.config, args.split] ) # load metric __UpperCAmelCase : Tuple = load_metric("""wer""" ) __UpperCAmelCase : List[str] = load_metric("""cer""" ) # compute metrics __UpperCAmelCase : Any = wer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) __UpperCAmelCase : Optional[Any] = cer.compute(references=result["""target"""] , predictions=result["""prediction"""] ) # print & log results __UpperCAmelCase : Union[str, Any] = f'''WER: {wer_result}\nCER: {cer_result}''' print(a_ ) with open(f'''{dataset_id}_eval_results.txt''' , """w""" ) as f: f.write(a_ ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: __UpperCAmelCase : Optional[int] = f'''log_{dataset_id}_predictions.txt''' __UpperCAmelCase : List[str] = f'''log_{dataset_id}_targets.txt''' with open(a_ , """w""" ) as p, open(a_ , """w""" ) as t: # mapping function to write output def write_to_file(_UpperCamelCase : Any , _UpperCamelCase : List[Any] ): p.write(f'''{i}''' + """\n""" ) p.write(batch["""prediction"""] + """\n""" ) t.write(f'''{i}''' + """\n""" ) t.write(batch["""target"""] + """\n""" ) result.map(a_ , with_indices=a_ ) def lowerCamelCase ( _UpperCamelCase : Tuple ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = """[,?.!\-\;\:\"“%‘”�—’…–]""" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training __UpperCAmelCase : Any = re.sub(a_ , """""" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! __UpperCAmelCase : Optional[int] = ["""\n\n""", """\n""", """ """, """ """] for t in token_sequences_to_ignore: __UpperCAmelCase : Optional[Any] = """ """.join(text.split(a_ ) ) return text def lowerCamelCase ( _UpperCamelCase : int ) -> str: '''simple docstring''' __UpperCAmelCase : str = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=a_ ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor __UpperCAmelCase : Tuple = AutoFeatureExtractor.from_pretrained(args.model_id ) __UpperCAmelCase : int = feature_extractor.sampling_rate # resample audio __UpperCAmelCase : Optional[int] = dataset.cast_column("""audio""" , Audio(sampling_rate=a_ ) ) # load eval pipeline if args.device is None: __UpperCAmelCase : Dict = 0 if torch.cuda.is_available() else -1 __UpperCAmelCase : Optional[int] = pipeline("""automatic-speech-recognition""" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(_UpperCamelCase : List[str] ): __UpperCAmelCase : str = asr( batch["""audio"""]["""array"""] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) __UpperCAmelCase : List[Any] = prediction["""text"""] __UpperCAmelCase : List[str] = normalize_text(batch["""sentence"""] ) return batch # run inference on all examples __UpperCAmelCase : Optional[int] = dataset.map(a_ , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(a_ , a_ ) if __name__ == "__main__": UpperCAmelCase : List[str] = argparse.ArgumentParser() parser.add_argument( '--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers' ) parser.add_argument( '--dataset', type=str, required=True, help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets', ) parser.add_argument( '--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice' ) parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`') parser.add_argument( '--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.' ) parser.add_argument( '--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.' ) parser.add_argument( '--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.' ) parser.add_argument( '--device', type=int, default=None, help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.', ) UpperCAmelCase : List[str] = parser.parse_args() main(args)
364
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int: '''simple docstring''' return 1 if input_a == input_a else 0 def lowerCamelCase ( ) -> None: '''simple docstring''' assert xnor_gate(0 , 0 ) == 1 assert xnor_gate(0 , 1 ) == 0 assert xnor_gate(1 , 0 ) == 0 assert xnor_gate(1 , 1 ) == 1 if __name__ == "__main__": print(xnor_gate(0, 0)) print(xnor_gate(0, 1)) print(xnor_gate(1, 0)) print(xnor_gate(1, 1))
365
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node UpperCAmelCase : Dict = 4 UpperCAmelCase : Optional[Any] = 3 class lowerCamelCase__ ( __SCREAMING_SNAKE_CASE ): """simple docstring""" pass def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' for shard in shards: for i in range(lowerCAmelCase__ ): yield {"i": i, "shard": shard} def lowerCamelCase ( ) -> Dict: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = int(os.environ["""RANK"""] ) __UpperCAmelCase : Optional[Any] = int(os.environ["""WORLD_SIZE"""] ) __UpperCAmelCase : Tuple = ArgumentParser() parser.add_argument("""--streaming""" , type=lowerCAmelCase__ ) parser.add_argument("""--local_rank""" , type=lowerCAmelCase__ ) parser.add_argument("""--num_workers""" , type=lowerCAmelCase__ , default=0 ) __UpperCAmelCase : Dict = parser.parse_args() __UpperCAmelCase : Dict = args.streaming __UpperCAmelCase : Optional[int] = args.num_workers __UpperCAmelCase : str = {"""shards""": [f'''shard_{shard_idx}''' for shard_idx in range(lowerCAmelCase__ )]} __UpperCAmelCase : Tuple = IterableDataset.from_generator(lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ ) if not streaming: __UpperCAmelCase : Tuple = Dataset.from_list(list(lowerCAmelCase__ ) ) __UpperCAmelCase : Any = split_dataset_by_node(lowerCAmelCase__ , rank=lowerCAmelCase__ , world_size=lowerCAmelCase__ ) __UpperCAmelCase : str = torch.utils.data.DataLoader(lowerCAmelCase__ , num_workers=lowerCAmelCase__ ) __UpperCAmelCase : List[str] = NUM_SHARDS * NUM_ITEMS_PER_SHARD __UpperCAmelCase : List[Any] = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) __UpperCAmelCase : List[str] = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(f'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
366
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
0
"""simple docstring""" from datetime import datetime import matplotlib.pyplot as plt import torch def lowerCamelCase ( _UpperCamelCase : str ) -> int: '''simple docstring''' for param in module.parameters(): __UpperCAmelCase : Any = False def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): __UpperCAmelCase : int = "mps" if device == "mps": print( """WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch""" """ errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues""" """ with generations.""" ) return device def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int: '''simple docstring''' __UpperCAmelCase : int = plt.imshow(a_ ) fig.axes.get_xaxis().set_visible(a_ ) fig.axes.get_yaxis().set_visible(a_ ) plt.show() def lowerCamelCase ( ) -> Dict: '''simple docstring''' __UpperCAmelCase : Dict = datetime.now() __UpperCAmelCase : List[str] = current_time.strftime("""%H:%M:%S""" ) return timestamp
367
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
0
"""simple docstring""" import json import os import sys import tempfile import unittest from pathlib import Path from shutil import copyfile from huggingface_hub import HfFolder, Repository, create_repo, delete_repo from requests.exceptions import HTTPError import transformers from transformers import ( CONFIG_MAPPING, FEATURE_EXTRACTOR_MAPPING, PROCESSOR_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoFeatureExtractor, AutoProcessor, AutoTokenizer, BertTokenizer, ProcessorMixin, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaProcessor, ) from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils')) from test_module.custom_configuration import CustomConfig # noqa E402 from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 from test_module.custom_processing import CustomProcessor # noqa E402 from test_module.custom_tokenization import CustomTokenizer # noqa E402 UpperCAmelCase : Tuple = get_tests_dir('fixtures/dummy_feature_extractor_config.json') UpperCAmelCase : List[str] = get_tests_dir('fixtures/vocab.json') UpperCAmelCase : Any = get_tests_dir('fixtures') class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[int] = 0 def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[str] = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase : int = WavaVecaConfig() __UpperCAmelCase : str = AutoProcessor.from_pretrained("""facebook/wav2vec2-base-960h""" ) # save in new folder model_config.save_pretrained(_SCREAMING_SNAKE_CASE ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Any = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: # copy relevant files copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) ) __UpperCAmelCase : Tuple = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor() __UpperCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) __UpperCAmelCase : Any = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(_SCREAMING_SNAKE_CASE ) # drop `processor_class` in tokenizer with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """r""" ) as f: __UpperCAmelCase : List[Any] = json.load(_SCREAMING_SNAKE_CASE ) config_dict.pop("""processor_class""" ) with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase : Dict = WavaVecaFeatureExtractor() __UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained("""facebook/wav2vec2-base-960h""" ) __UpperCAmelCase : List[str] = WavaVecaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # save in new folder processor.save_pretrained(_SCREAMING_SNAKE_CASE ) # drop `processor_class` in feature extractor with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """r""" ) as f: __UpperCAmelCase : int = json.load(_SCREAMING_SNAKE_CASE ) config_dict.pop("""processor_class""" ) with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f: f.write(json.dumps(_SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmpdirname: __UpperCAmelCase : Optional[Any] = WavaVecaConfig(processor_class="""Wav2Vec2Processor""" ) model_config.save_pretrained(_SCREAMING_SNAKE_CASE ) # copy relevant files copyfile(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , """vocab.json""" ) ) # create emtpy sample processor with open(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , """w""" ) as f: f.write("""{}""" ) __UpperCAmelCase : Tuple = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' with self.assertRaises(_SCREAMING_SNAKE_CASE ): __UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) # If remote code is disabled, we can't load this config. with self.assertRaises(_SCREAMING_SNAKE_CASE ): __UpperCAmelCase : Optional[int] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertTrue(processor.special_attribute_present ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) __UpperCAmelCase : Any = processor.feature_extractor self.assertTrue(feature_extractor.special_attribute_present ) self.assertEqual(feature_extractor.__class__.__name__ , """NewFeatureExtractor""" ) __UpperCAmelCase : Tuple = processor.tokenizer self.assertTrue(tokenizer.special_attribute_present ) if is_tokenizers_available(): self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizerFast""" ) # Test we can also load the slow version __UpperCAmelCase : Optional[Any] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE , use_fast=_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[str] = new_processor.tokenizer self.assertTrue(new_tokenizer.special_attribute_present ) self.assertEqual(new_tokenizer.__class__.__name__ , """NewTokenizer""" ) else: self.assertEqual(tokenizer.__class__.__name__ , """NewTokenizer""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' try: AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Trying to register something existing in the Transformers library will raise an error with self.assertRaises(_SCREAMING_SNAKE_CASE ): AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Now that the config is registered, it can be used as any other config with the auto-API __UpperCAmelCase : Union[str, Any] = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" ) with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __UpperCAmelCase : Optional[Any] = CustomTokenizer(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[int] = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : int = AutoProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' class lowerCamelCase__ ( _UpperCamelCase ): """simple docstring""" __a = False class lowerCamelCase__ ( _UpperCamelCase ): """simple docstring""" __a = False class lowerCamelCase__ ( _UpperCamelCase ): """simple docstring""" __a = 'AutoFeatureExtractor' __a = 'AutoTokenizer' __a = False try: AutoConfig.register("""custom""" , _SCREAMING_SNAKE_CASE ) AutoFeatureExtractor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) AutoTokenizer.register(_SCREAMING_SNAKE_CASE , slow_tokenizer_class=_SCREAMING_SNAKE_CASE ) AutoProcessor.register(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # If remote code is not set, the default is to use local classes. __UpperCAmelCase : Tuple = AutoProcessor.from_pretrained("""hf-internal-testing/test_dynamic_processor""" ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote code is disabled, we load the local ones. __UpperCAmelCase : Dict = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertFalse(processor.special_attribute_present ) self.assertFalse(processor.feature_extractor.special_attribute_present ) self.assertFalse(processor.tokenizer.special_attribute_present ) # If remote is enabled, we load from the Hub. __UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained( """hf-internal-testing/test_dynamic_processor""" , trust_remote_code=_SCREAMING_SNAKE_CASE ) self.assertEqual(processor.__class__.__name__ , """NewProcessor""" ) self.assertTrue(processor.special_attribute_present ) self.assertTrue(processor.feature_extractor.special_attribute_present ) self.assertTrue(processor.tokenizer.special_attribute_present ) finally: if "custom" in CONFIG_MAPPING._extra_content: del CONFIG_MAPPING._extra_content["custom"] if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content: del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] if CustomConfig in TOKENIZER_MAPPING._extra_content: del TOKENIZER_MAPPING._extra_content[CustomConfig] if CustomConfig in PROCESSOR_MAPPING._extra_content: del PROCESSOR_MAPPING._extra_content[CustomConfig] def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-bert""" ) self.assertEqual(processor.__class__.__name__ , """BertTokenizerFast""" ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = AutoProcessor.from_pretrained("""hf-internal-testing/tiny-random-convnext""" ) self.assertEqual(processor.__class__.__name__ , """ConvNextImageProcessor""" ) @is_staging_test class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou'] @classmethod def lowerCamelCase__ ( cls : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = TOKEN HfFolder.save_token(_SCREAMING_SNAKE_CASE ) @classmethod def lowerCamelCase__ ( cls : int ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="""test-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""valid_org/test-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="""test-dynamic-processor""" ) except HTTPError: pass def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Tuple = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(_SCREAMING_SNAKE_CASE , """test-processor""" ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token ) __UpperCAmelCase : List[str] = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = WavaVecaProcessor.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: processor.save_pretrained( os.path.join(_SCREAMING_SNAKE_CASE , """test-processor-org""" ) , push_to_hub=_SCREAMING_SNAKE_CASE , use_auth_token=self._token , organization="""valid_org""" , ) __UpperCAmelCase : Union[str, Any] = WavaVecaProcessor.from_pretrained("""valid_org/test-processor-org""" ) for k, v in processor.feature_extractor.__dict__.items(): self.assertEqual(_SCREAMING_SNAKE_CASE , getattr(new_processor.feature_extractor , _SCREAMING_SNAKE_CASE ) ) self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() CustomTokenizer.register_for_auto_class() CustomProcessor.register_for_auto_class() __UpperCAmelCase : str = CustomFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: __UpperCAmelCase : Optional[int] = os.path.join(_SCREAMING_SNAKE_CASE , """vocab.txt""" ) with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in self.vocab_tokens] ) ) __UpperCAmelCase : int = CustomTokenizer(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Dict = CustomProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) with tempfile.TemporaryDirectory() as tmp_dir: create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token ) __UpperCAmelCase : str = Repository(_SCREAMING_SNAKE_CASE , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token ) processor.save_pretrained(_SCREAMING_SNAKE_CASE ) # This has added the proper auto_map field to the feature extractor config self.assertDictEqual( processor.feature_extractor.auto_map , { """AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor""", """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # This has added the proper auto_map field to the tokenizer config with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) ) as f: __UpperCAmelCase : Optional[int] = json.load(_SCREAMING_SNAKE_CASE ) self.assertDictEqual( tokenizer_config["""auto_map"""] , { """AutoTokenizer""": ["""custom_tokenization.CustomTokenizer""", None], """AutoProcessor""": """custom_processing.CustomProcessor""", } , ) # The code has been copied from fixtures self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_feature_extraction.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_tokenization.py""" ) ) ) self.assertTrue(os.path.isfile(os.path.join(_SCREAMING_SNAKE_CASE , """custom_processing.py""" ) ) ) repo.push_to_hub() __UpperCAmelCase : Dict = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=_SCREAMING_SNAKE_CASE ) # Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module self.assertEqual(new_processor.__class__.__name__ , """CustomProcessor""" )
368
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
0
"""simple docstring""" import math def lowerCamelCase ( _UpperCamelCase : int ) -> Dict: '''simple docstring''' __UpperCAmelCase : Any = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(a__ ) def lowerCamelCase ( _UpperCamelCase : str = 1 / 1_2_3_4_5 ) -> Dict: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : int = 3 while True: __UpperCAmelCase : Union[str, Any] = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(a__ ): __UpperCAmelCase : List[str] = int(a__ ) total_partitions += 1 if check_partition_perfect(a__ ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(a__ ) integer += 1 if __name__ == "__main__": print(F"{solution() = }")
369
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
0
"""simple docstring""" import numpy as np from sklearn.datasets import fetch_california_housing from sklearn.metrics import mean_absolute_error, mean_squared_error from sklearn.model_selection import train_test_split from xgboost import XGBRegressor def lowerCamelCase ( _UpperCamelCase : dict ) -> Optional[int]: '''simple docstring''' return (data["data"], data["target"]) def lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray ) -> Any: '''simple docstring''' __UpperCAmelCase : Tuple = XGBRegressor(verbosity=0 , random_state=4_2 ) xgb.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Predict target for test data __UpperCAmelCase : Union[str, Any] = xgb.predict(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Tuple = predictions.reshape(len(__SCREAMING_SNAKE_CASE ) , 1 ) return predictions def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = fetch_california_housing() __UpperCAmelCase : Any = data_handling(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : int = train_test_split( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 , random_state=1 ) __UpperCAmelCase : Union[str, Any] = xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Error printing print(f'''Mean Absolute Error : {mean_absolute_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' ) print(f'''Mean Square Error : {mean_squared_error(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )}''' ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
370
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
0
"""simple docstring""" import os import numpy import onnx def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = a.name __UpperCAmelCase : Tuple = b.name __UpperCAmelCase : Any = """""" __UpperCAmelCase : List[Any] = """""" __UpperCAmelCase : List[Any] = a == b __UpperCAmelCase : Union[str, Any] = name_a __UpperCAmelCase : Optional[Any] = name_b return res def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' for i, input_name in enumerate(node_proto.input ): if input_name == name: node_proto.input.insert(UpperCAmelCase__ , UpperCAmelCase__ ) node_proto.input.pop(i + 1 ) if node_proto.op_type == "If": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) _graph_replace_input_with(node_proto.attribute[1].g , UpperCAmelCase__ , UpperCAmelCase__ ) if node_proto.op_type == "Loop": _graph_replace_input_with(node_proto.attribute[0].g , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : List[Any] , _UpperCamelCase : str ) -> int: '''simple docstring''' for n in graph_proto.node: _node_replace_input_with(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) -> List[str]: '''simple docstring''' __UpperCAmelCase : int = list(model.graph.initializer ) __UpperCAmelCase : List[str] = list(model_without_ext.graph.initializer ) for i, ref_i in ind_to_replace: assert inits_with_data[i].name == inits[i].name assert inits_with_data[ref_i].name == inits[ref_i].name assert i > ref_i __UpperCAmelCase : Optional[Any] = inits[i].name __UpperCAmelCase : List[str] = inits[ref_i].name model_without_ext.graph.initializer.remove(inits[i] ) # for n in model.graph.node: _graph_replace_input_with(model_without_ext.graph , UpperCAmelCase__ , UpperCAmelCase__ ) def lowerCamelCase ( _UpperCamelCase : int ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = os.path.dirname(UpperCAmelCase__ ) __UpperCAmelCase : Optional[Any] = os.path.basename(UpperCAmelCase__ ) __UpperCAmelCase : str = onnx.load(os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) ) __UpperCAmelCase : List[Any] = list(model.graph.initializer ) __UpperCAmelCase : int = set() __UpperCAmelCase : int = {} __UpperCAmelCase : str = [] __UpperCAmelCase : int = 0 for i in range(len(UpperCAmelCase__ ) ): if i in dup_set: continue for j in range(i + 1 , len(UpperCAmelCase__ ) ): if j in dup_set: continue if _is_equal_tensor_proto(inits[i] , inits[j] ): dup_set.add(UpperCAmelCase__ ) dup_set.add(UpperCAmelCase__ ) __UpperCAmelCase : Dict = inits[j].data_type __UpperCAmelCase : List[str] = numpy.prod(inits[j].dims ) if dtype == 1: mem_size *= 4 elif dtype == 6: mem_size *= 4 elif dtype == 7 or dtype == 1_1: mem_size *= 8 else: print("""unexpected data type: """ , UpperCAmelCase__ ) total_reduced_size += mem_size __UpperCAmelCase : int = inits[i].name __UpperCAmelCase : List[str] = inits[j].name if name_i in dup_map: dup_map[name_i].append(UpperCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = [name_j] ind_to_replace.append((j, i) ) print("""total reduced size: """ , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , """GB""" ) __UpperCAmelCase : Tuple = sorted(UpperCAmelCase__ ) _remove_dup_initializers_from_model(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) __UpperCAmelCase : Union[str, Any] = """optimized_""" + model_file_name __UpperCAmelCase : Optional[int] = os.path.join(UpperCAmelCase__ , UpperCAmelCase__ ) onnx.save(UpperCAmelCase__ , UpperCAmelCase__ ) return new_model
371
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
0
"""simple docstring""" import colorsys from PIL import Image # type: ignore def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : int ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = x __UpperCAmelCase : Optional[Any] = y for step in range(__a ): # noqa: B007 __UpperCAmelCase : Any = a * a - b * b + x __UpperCAmelCase : Union[str, Any] = 2 * a * b + y __UpperCAmelCase : Tuple = a_new # divergence happens for all complex number with an absolute value # greater than 4 if a * a + b * b > 4: break return step / (max_step - 1) def lowerCamelCase ( _UpperCamelCase : float ) -> str: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return (2_5_5, 2_5_5, 2_5_5) def lowerCamelCase ( _UpperCamelCase : float ) -> str: '''simple docstring''' if distance == 1: return (0, 0, 0) else: return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(__a , 1 , 1 ) ) def lowerCamelCase ( _UpperCamelCase : int = 8_0_0 , _UpperCamelCase : int = 6_0_0 , _UpperCamelCase : float = -0.6 , _UpperCamelCase : float = 0 , _UpperCamelCase : float = 3.2 , _UpperCamelCase : int = 5_0 , _UpperCamelCase : bool = True , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Any = Image.new("""RGB""" , (image_width, image_height) ) __UpperCAmelCase : Dict = img.load() # loop through the image-coordinates for image_x in range(__a ): for image_y in range(__a ): # determine the figure-coordinates based on the image-coordinates __UpperCAmelCase : List[Any] = figure_width / image_width * image_height __UpperCAmelCase : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width __UpperCAmelCase : List[str] = figure_center_y + (image_y / image_height - 0.5) * figure_height __UpperCAmelCase : Any = get_distance(__a , __a , __a ) # color the corresponding pixel based on the selected coloring-function if use_distance_color_coding: __UpperCAmelCase : Any = get_color_coded_rgb(__a ) else: __UpperCAmelCase : Tuple = get_black_and_white_rgb(__a ) return img if __name__ == "__main__": import doctest doctest.testmod() # colored version, full figure UpperCAmelCase : Tuple = get_image() # uncomment for colored version, different section, zoomed in # img = get_image(figure_center_x = -0.6, figure_center_y = -0.4, # figure_width = 0.8) # uncomment for black and white version, full figure # img = get_image(use_distance_color_coding = False) # uncomment to save the image # img.save("mandelbrot.png") img.show()
350
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
0
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = HfArgumentParser(__lowerCamelCase ) __UpperCAmelCase : Any = parser.parse_args_into_dataclasses()[0] __UpperCAmelCase : Any = TensorFlowBenchmark(args=__lowerCamelCase ) try: __UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()[0] except ValueError as e: __UpperCAmelCase : Union[str, Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead." __UpperCAmelCase : List[str] = " ".join(str(__lowerCamelCase ).split(""" """ )[:-1] ) __UpperCAmelCase : Optional[Any] = "" __UpperCAmelCase : str = eval(str(__lowerCamelCase ).split(""" """ )[-1] ) __UpperCAmelCase : Union[str, Any] = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__lowerCamelCase ) if len(__lowerCamelCase ) > 0: __UpperCAmelCase : str = full_error_msg + begin_error_msg + str(__lowerCamelCase ) raise ValueError(__lowerCamelCase ) benchmark.run() if __name__ == "__main__": main()
351
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 2_0_0 ) -> int: '''simple docstring''' __UpperCAmelCase : List[str] = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 2_0_0] __UpperCAmelCase : Tuple = [0] * (pence + 1) __UpperCAmelCase : List[Any] = 1 # base case: 1 way to make 0 pence for coin in coins: for i in range(_snake_case , pence + 1 , 1 ): number_of_ways[i] += number_of_ways[i - coin] return number_of_ways[pence] if __name__ == "__main__": assert solution(200) == 7_3682
352
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
0
"""simple docstring""" import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import is_pyanvml_available, is_tf_available, logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_pyanvml_available(): import pyanvml.pyanvml as nvml UpperCAmelCase : str = logging.get_logger(__name__) def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Dict ) -> Tuple: '''simple docstring''' def run_func(_UpperCamelCase : Optional[Any] ): @wraps(_A ) def run_in_eager_mode(*_UpperCamelCase : Optional[Any] , **_UpperCamelCase : List[Any] ): return func(*_A , **_A ) @wraps(_A ) @tf.function(experimental_compile=_A ) def run_in_graph_mode(*_UpperCamelCase : Optional[Any] , **_UpperCamelCase : Any ): return func(*_A , **_A ) if do_eager_mode is True: if use_xla is not False: raise ValueError( """Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" ) return run_in_eager_mode else: return run_in_graph_mode return run_func def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] , _UpperCamelCase : Tuple ) -> Dict: '''simple docstring''' __UpperCAmelCase : Optional[int] = random.Random() __UpperCAmelCase : List[Any] = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )] return tf.constant(_A , shape=(batch_size, sequence_length) , dtype=tf.intaa ) class lowerCamelCase__ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" __a = 42 __a = 42 __a = """TensorFlow""" @property def lowerCamelCase__ ( self : Any ): '''simple docstring''' return tf.__version__ def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : int = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) __UpperCAmelCase : Tuple = self._prepare_inference_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_speed(_inference ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) __UpperCAmelCase : Union[str, Any] = self._prepare_train_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_speed(_train ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : List[Any] ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case__ ) __UpperCAmelCase : List[Any] = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) __UpperCAmelCase : Union[str, Any] = self._prepare_inference_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_memory(_inference ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , snake_case__ ) __UpperCAmelCase : str = self.args.strategy if strategy is None: raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" ) __UpperCAmelCase : str = self._prepare_train_func(snake_case__ , snake_case__ , snake_case__ ) return self._measure_memory(_train ) def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Any = self.config_dict[model_name] if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) __UpperCAmelCase : int = ( hasattr(snake_case__ , """architectures""" ) and isinstance(config.architectures , snake_case__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __UpperCAmelCase : str = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model __UpperCAmelCase : Optional[int] = __import__("""transformers""" , fromlist=[model_class] ) __UpperCAmelCase : List[str] = getattr(snake_case__ , snake_case__ ) __UpperCAmelCase : Dict = model_cls(snake_case__ ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: __UpperCAmelCase : Dict = TF_MODEL_MAPPING[config.__class__](snake_case__ ) # encoder-decoder has vocab size saved differently __UpperCAmelCase : str = config.vocab_size if hasattr(snake_case__ , """vocab_size""" ) else config.encoder.vocab_size __UpperCAmelCase : List[str] = random_input_ids(snake_case__ , snake_case__ , snake_case__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_forward(): return model(snake_case__ , decoder_input_ids=snake_case__ , training=snake_case__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_forward(): return model(snake_case__ , training=snake_case__ ) __UpperCAmelCase : Dict = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def lowerCamelCase__ ( self : int , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : List[str] = self.config_dict[model_name] if self.args.eager_mode is not False: raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" ) if self.args.fpaa: raise NotImplementedError("""Mixed precision is currently not supported.""" ) __UpperCAmelCase : Dict = ( hasattr(snake_case__ , """architectures""" ) and isinstance(config.architectures , snake_case__ ) and len(config.architectures ) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: __UpperCAmelCase : Any = 'TF' + config.architectures[0] # prepend 'TF' for tensorflow model __UpperCAmelCase : Optional[Any] = __import__("""transformers""" , fromlist=[model_class] ) __UpperCAmelCase : Tuple = getattr(snake_case__ , snake_case__ ) __UpperCAmelCase : str = model_cls(snake_case__ ) except ImportError: raise ImportError( f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to''' """ set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" ) else: __UpperCAmelCase : Optional[Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](snake_case__ ) # encoder-decoder has vocab size saved differently __UpperCAmelCase : Union[str, Any] = config.vocab_size if hasattr(snake_case__ , """vocab_size""" ) else config.encoder.vocab_size __UpperCAmelCase : Optional[int] = random_input_ids(snake_case__ , snake_case__ , snake_case__ ) @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_decoder_train(): __UpperCAmelCase : Optional[int] = model(snake_case__ , decoder_input_ids=snake_case__ , labels=snake_case__ , training=snake_case__ )[0] __UpperCAmelCase : List[Any] = tf.gradients(snake_case__ , model.trainable_variables ) return gradients @run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla ) def encoder_train(): __UpperCAmelCase : Union[str, Any] = model(snake_case__ , labels=snake_case__ , training=snake_case__ )[0] __UpperCAmelCase : Union[str, Any] = tf.gradients(snake_case__ , model.trainable_variables ) return gradients __UpperCAmelCase : Dict = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" ) timeit.repeat(snake_case__ , repeat=1 , number=5 ) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average __UpperCAmelCase : Dict = timeit.repeat( snake_case__ , repeat=self.args.repeat , number=10 , ) return min(snake_case__ ) / 10.0 except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' logger.info( """Note that TensorFlow allocates more memory than """ """it might need to speed up computation. """ """The memory reported here corresponds to the memory """ """reported by `nvidia-smi`, which can vary depending """ """on total available memory on the GPU that is used.""" ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: if not self.args.eager_mode: raise ValueError( """`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory""" """ consumption line by line.""" ) __UpperCAmelCase : Tuple = start_memory_tracing("""transformers""" ) if self.args.is_tpu: # tpu raise NotImplementedError( """Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking""" """ with `args.memory=False`""" ) elif self.args.is_gpu: # gpu if not is_pyanvml_available(): logger.warning( """py3nvml not installed, we won\'t log GPU memory usage. """ """Install py3nvml (pip install py3nvml) to log information about GPU.""" ) __UpperCAmelCase : int = 'N/A' else: logger.info( """Measuring total GPU usage on GPU device. Make sure to not have additional processes""" """ running on the same GPU.""" ) # init nvml nvml.nvmlInit() func() __UpperCAmelCase : int = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx ) __UpperCAmelCase : Optional[Any] = nvml.nvmlDeviceGetMemoryInfo(snake_case__ ) __UpperCAmelCase : Optional[Any] = meminfo.used __UpperCAmelCase : Optional[int] = Memory(snake_case__ ) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( """When enabling line by line tracing, the max peak memory for CPU is inaccurate in""" """ TensorFlow.""" ) __UpperCAmelCase : str = None else: __UpperCAmelCase : str = measure_peak_memory_cpu(snake_case__ ) __UpperCAmelCase : Optional[int] = Memory(snake_case__ ) if isinstance(snake_case__ , snake_case__ ) else memory_bytes if self.args.trace_memory_line_by_line: __UpperCAmelCase : List[Any] = stop_memory_tracing(snake_case__ ) if memory is None: __UpperCAmelCase : int = summary.total else: __UpperCAmelCase : int = None return memory, summary except ResourceExhaustedError as e: self.print_fn(f'''Doesn\'t fit on GPU. {e}''' ) return "N/A", None
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : float , _UpperCamelCase : int ) -> float: '''simple docstring''' if digit_amount > 0: return round(number - int(_UpperCamelCase ) , _UpperCamelCase ) return number - int(_UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import json import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from transformers import OneFormerImageProcessor from transformers.models.oneformer.image_processing_oneformer import binary_mask_to_rle from transformers.models.oneformer.modeling_oneformer import OneFormerForUniversalSegmentationOutput if is_vision_available(): from PIL import Image def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any]="shi-labs/oneformer_demo" ) -> Union[str, Any]: '''simple docstring''' with open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""" ) , """r""" ) as f: __UpperCAmelCase : Optional[int] = json.load(snake_case_ ) __UpperCAmelCase : Any = {} __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : int = [] for key, info in class_info.items(): __UpperCAmelCase : Optional[int] = info["""name"""] class_names.append(info["""name"""] ) if info["isthing"]: thing_ids.append(int(snake_case_ ) ) __UpperCAmelCase : Any = thing_ids __UpperCAmelCase : Any = class_names return metadata class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=7 , UpperCamelCase : str=3 , UpperCamelCase : int=30 , UpperCamelCase : int=400 , UpperCamelCase : Any=None , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=[0.5, 0.5, 0.5] , UpperCamelCase : int=[0.5, 0.5, 0.5] , UpperCamelCase : Optional[int]=10 , UpperCamelCase : Any=False , UpperCamelCase : Dict=255 , UpperCamelCase : int="shi-labs/oneformer_demo" , UpperCamelCase : Union[str, Any]="ade20k_panoptic.json" , UpperCamelCase : List[Any]=10 , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : Any = min_resolution __UpperCAmelCase : List[str] = max_resolution __UpperCAmelCase : Tuple = do_resize __UpperCAmelCase : Optional[int] = {"""shortest_edge""": 32, """longest_edge""": 1_333} if size is None else size __UpperCAmelCase : str = do_normalize __UpperCAmelCase : Tuple = image_mean __UpperCAmelCase : str = image_std __UpperCAmelCase : Optional[int] = class_info_file __UpperCAmelCase : Dict = prepare_metadata(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Dict = num_text __UpperCAmelCase : Optional[int] = repo_path # for the post_process_functions __UpperCAmelCase : str = 2 __UpperCAmelCase : Union[str, Any] = 10 __UpperCAmelCase : List[Any] = 10 __UpperCAmelCase : Dict = 3 __UpperCAmelCase : str = 4 __UpperCAmelCase : Any = num_labels __UpperCAmelCase : Dict = do_reduce_labels __UpperCAmelCase : Union[str, Any] = ignore_index def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "num_labels": self.num_labels, "do_reduce_labels": self.do_reduce_labels, "ignore_index": self.ignore_index, "class_info_file": self.class_info_file, "metadata": self.metadata, "num_text": self.num_text, } def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple=False ): '''simple docstring''' if not batched: __UpperCAmelCase : int = image_inputs[0] if isinstance(SCREAMING_SNAKE_CASE_ , Image.Image ): __UpperCAmelCase : Union[str, Any] = image.size else: __UpperCAmelCase : List[str] = image.shape[1], image.shape[2] if w < h: __UpperCAmelCase : Tuple = int(self.size["""shortest_edge"""] * h / w ) __UpperCAmelCase : str = self.size["""shortest_edge"""] elif w > h: __UpperCAmelCase : Tuple = self.size["""shortest_edge"""] __UpperCAmelCase : Union[str, Any] = int(self.size["""shortest_edge"""] * w / h ) else: __UpperCAmelCase : List[Any] = self.size["""shortest_edge"""] __UpperCAmelCase : str = self.size["""shortest_edge"""] else: __UpperCAmelCase : int = [] for image in image_inputs: __UpperCAmelCase : Dict = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __UpperCAmelCase : List[Any] = max(SCREAMING_SNAKE_CASE_ , key=lambda UpperCamelCase : item[0] )[0] __UpperCAmelCase : int = max(SCREAMING_SNAKE_CASE_ , key=lambda UpperCamelCase : item[1] )[1] return expected_height, expected_width def lowerCamelCase__ ( self : Any ): '''simple docstring''' return OneFormerForUniversalSegmentationOutput( # +1 for null class class_queries_logits=torch.randn((self.batch_size, self.num_queries, self.num_classes + 1) ) , masks_queries_logits=torch.randn((self.batch_size, self.num_queries, self.height, self.width) ) , ) @require_torch @require_vision class lowerCamelCase__ ( _UpperCAmelCase , unittest.TestCase ): """simple docstring""" __a = OneFormerImageProcessor if (is_vision_available() and is_torch_available()) else None # only for test_image_processing_common.test_image_proc_to_json_string __a = image_processing_class def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = OneFormerImageProcessorTester(self ) @property def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return self.image_processing_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_mean""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """image_std""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_normalize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_resize""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """size""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """ignore_index""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """class_info_file""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """num_text""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """repo_path""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """metadata""" ) ) self.assertTrue(hasattr(SCREAMING_SNAKE_CASE_ , """do_reduce_labels""" ) ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : int = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , Image.Image ) # Test not batched input __UpperCAmelCase : List[str] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : str = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Tuple = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Optional[int] = image_processor( SCREAMING_SNAKE_CASE_ , ["""semantic"""] * len(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : List[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , numpify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) # Test not batched input __UpperCAmelCase : Optional[Any] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Any = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : int = image_processor( SCREAMING_SNAKE_CASE_ , ["""semantic"""] * len(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Dict = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ , torchify=SCREAMING_SNAKE_CASE_ ) for image in image_inputs: self.assertIsInstance(SCREAMING_SNAKE_CASE_ , torch.Tensor ) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , ["""semantic"""] , return_tensors="""pt""" ).pixel_values __UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ ) self.assertEqual( encoded_images.shape , (1, self.image_processing_tester.num_channels, expected_height, expected_width) , ) # Test batched __UpperCAmelCase : Optional[Any] = self.image_processing_tester.get_expected_values(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Tuple = image_processor( SCREAMING_SNAKE_CASE_ , ["""semantic"""] * len(SCREAMING_SNAKE_CASE_ ) , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processing_tester.batch_size, self.image_processing_tester.num_channels, expected_height, expected_width, ) , ) def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any]=False , UpperCamelCase : int=False , UpperCamelCase : List[Any]="np" ): '''simple docstring''' __UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) # prepare image and target __UpperCAmelCase : Union[str, Any] = self.image_processing_tester.num_labels __UpperCAmelCase : str = None __UpperCAmelCase : int = None __UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processing_tester , equal_resolution=SCREAMING_SNAKE_CASE_ ) if with_segmentation_maps: __UpperCAmelCase : Any = num_labels if is_instance_map: __UpperCAmelCase : Tuple = list(range(SCREAMING_SNAKE_CASE_ ) ) * 2 __UpperCAmelCase : List[str] = dict(enumerate(SCREAMING_SNAKE_CASE_ ) ) __UpperCAmelCase : Optional[Any] = [ np.random.randint(0 , high * 2 , (img.size[1], img.size[0]) ).astype(np.uinta ) for img in image_inputs ] if segmentation_type == "pil": __UpperCAmelCase : List[Any] = [Image.fromarray(SCREAMING_SNAKE_CASE_ ) for annotation in annotations] __UpperCAmelCase : List[Any] = image_processor( SCREAMING_SNAKE_CASE_ , ["""semantic"""] * len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" , instance_id_to_semantic_id=SCREAMING_SNAKE_CASE_ , pad_and_return_pixel_mask=SCREAMING_SNAKE_CASE_ , ) return inputs def lowerCamelCase__ ( self : str ): '''simple docstring''' pass def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' def common(UpperCamelCase : List[Any]=False , UpperCamelCase : Tuple=None ): __UpperCAmelCase : Dict = self.comm_get_image_processor_inputs( with_segmentation_maps=SCREAMING_SNAKE_CASE_ , is_instance_map=SCREAMING_SNAKE_CASE_ , segmentation_type=SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Optional[int] = inputs["""mask_labels"""] __UpperCAmelCase : Any = inputs["""class_labels"""] __UpperCAmelCase : Any = inputs["""pixel_values"""] __UpperCAmelCase : Union[str, Any] = inputs["""text_inputs"""] # check the batch_size for mask_label, class_label, text_input in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): self.assertEqual(mask_label.shape[0] , class_label.shape[0] ) # this ensure padding has happened self.assertEqual(mask_label.shape[1:] , pixel_values.shape[2:] ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.image_processing_tester.num_text ) common() common(is_instance_map=SCREAMING_SNAKE_CASE_ ) common(is_instance_map=SCREAMING_SNAKE_CASE_ , segmentation_type="""pil""" ) common(is_instance_map=SCREAMING_SNAKE_CASE_ , segmentation_type="""pil""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : int = np.zeros((20, 50) ) __UpperCAmelCase : Dict = 1 __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Optional[int] = 1 __UpperCAmelCase : Optional[int] = binary_mask_to_rle(SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 4 ) self.assertEqual(rle[0] , 21 ) self.assertEqual(rle[1] , 45 ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) __UpperCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCAmelCase : int = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE_ ) self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , self.image_processing_tester.batch_size ) self.assertEqual( segmentation[0].shape , ( self.image_processing_tester.height, self.image_processing_tester.width, ) , ) __UpperCAmelCase : Optional[Any] = [(1, 4) for i in range(self.image_processing_tester.batch_size )] __UpperCAmelCase : Union[str, Any] = fature_extractor.post_process_semantic_segmentation(SCREAMING_SNAKE_CASE_ , target_sizes=SCREAMING_SNAKE_CASE_ ) self.assertEqual(segmentation[0].shape , target_sizes[0] ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) __UpperCAmelCase : List[str] = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCAmelCase : Any = image_processor.post_process_instance_segmentation(SCREAMING_SNAKE_CASE_ , threshold=0 ) self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.image_processing_class( num_labels=self.image_processing_tester.num_classes , max_seq_length=77 , task_seq_length=77 , class_info_file="""ade20k_panoptic.json""" , num_text=self.image_processing_tester.num_text , repo_path="""shi-labs/oneformer_demo""" , ) __UpperCAmelCase : Union[str, Any] = self.image_processing_tester.get_fake_oneformer_outputs() __UpperCAmelCase : Dict = image_processor.post_process_panoptic_segmentation(SCREAMING_SNAKE_CASE_ , threshold=0 ) self.assertTrue(len(SCREAMING_SNAKE_CASE_ ) == self.image_processing_tester.batch_size ) for el in segmentation: self.assertTrue("""segmentation""" in el ) self.assertTrue("""segments_info""" in el ) self.assertEqual(type(el["""segments_info"""] ) , SCREAMING_SNAKE_CASE_ ) self.assertEqual( el["""segmentation"""].shape , (self.image_processing_tester.height, self.image_processing_tester.width) )
355
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
0
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCamelCase__ ( __UpperCamelCase ): """simple docstring""" def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str ): '''simple docstring''' with open(UpperCamelCase , encoding="""utf-8""" ) as input_file: __UpperCAmelCase : int = re.compile(R"""(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)""" ) __UpperCAmelCase : List[Any] = input_file.read() __UpperCAmelCase : Optional[Any] = regexp.search(UpperCamelCase ) return match def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str ): '''simple docstring''' with open(UpperCamelCase , encoding="""utf-8""" ) as input_file: __UpperCAmelCase : List[str] = re.compile(R"""#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()""" , re.DOTALL ) __UpperCAmelCase : Optional[int] = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` __UpperCAmelCase : Tuple = regexp.finditer(UpperCamelCase ) __UpperCAmelCase : str = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : int = Path("""./datasets""" ) __UpperCAmelCase : Optional[int] = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(UpperCamelCase ) ): raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = Path("""./datasets""" ) __UpperCAmelCase : Dict = list(dataset_paths.absolute().glob("""**/*.py""" ) ) for dataset in dataset_files: if self._no_print_statements(str(UpperCamelCase ) ): raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
356
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import functools def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = len(A__ ) __UpperCAmelCase : Tuple = len(A__ ) @functools.cache def min_distance(_UpperCamelCase : int , _UpperCamelCase : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa __UpperCAmelCase : int = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , A__ ) , 1 + min_distance(A__ , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
357
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
0
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging UpperCAmelCase : int = logging.get_logger(__name__) logging.set_verbosity_info() def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> int: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: __UpperCAmelCase : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(_UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : Any = XLMProphetNetForConditionalGeneration.from_pretrained( _UpperCamelCase , output_loading_info=_UpperCamelCase ) else: __UpperCAmelCase : int = ProphetNetForConditionalGenerationOld.from_pretrained(_UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = ProphetNetForConditionalGeneration.from_pretrained( _UpperCamelCase , output_loading_info=_UpperCamelCase ) __UpperCAmelCase : Dict = ["""key_proj""", """value_proj""", """query_proj"""] __UpperCAmelCase : Union[str, Any] = { """self_attn""": """ngram_self_attn""", """cross_attn""": """encoder_attn""", """cross_attn_layer_norm""": """encoder_attn_layer_norm""", """feed_forward_layer_norm""": """final_layer_norm""", """feed_forward""": """""", """intermediate""": """fc1""", """output""": """fc2""", """key_proj""": """k_proj""", """query_proj""": """q_proj""", """value_proj""": """v_proj""", """word_embeddings""": """embed_tokens""", """embeddings_layer_norm""": """emb_layer_norm""", """relative_pos_embeddings""": """relative_linear""", """ngram_embeddings""": """ngram_input_embed""", """position_embeddings""": """embed_positions""", } for key in loading_info["missing_keys"]: __UpperCAmelCase : Dict = key.split(""".""" ) if attributes[0] == "lm_head": __UpperCAmelCase : Dict = prophet __UpperCAmelCase : Dict = prophet_old else: __UpperCAmelCase : str = prophet.prophetnet __UpperCAmelCase : Union[str, Any] = prophet_old.model __UpperCAmelCase : Any = False for attribute in attributes: if attribute in mapping: __UpperCAmelCase : str = mapping[attribute] if not hasattr(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0: __UpperCAmelCase : Tuple = attribute elif hasattr(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" __UpperCAmelCase : Any = old_model.weight logger.info(f'''{attribute} is initialized.''' ) __UpperCAmelCase : List[str] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" __UpperCAmelCase : int = old_model.bias logger.info(f'''{attribute} is initialized''' ) __UpperCAmelCase : Any = True break elif attribute in special_keys and hasattr(_UpperCamelCase , """in_proj_weight""" ): __UpperCAmelCase : List[str] = old_model.in_proj_weight.shape[0] // 3 __UpperCAmelCase : Any = getattr(_UpperCamelCase , _UpperCamelCase ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": __UpperCAmelCase : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) __UpperCAmelCase : str = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": __UpperCAmelCase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) __UpperCAmelCase : Optional[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": __UpperCAmelCase : Tuple = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) __UpperCAmelCase : str = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) __UpperCAmelCase : List[Any] = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings." __UpperCAmelCase : Tuple = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] ) __UpperCAmelCase : int = True break if attribute.isdigit(): __UpperCAmelCase : int = model[int(_UpperCamelCase )] __UpperCAmelCase : Any = old_model[int(_UpperCamelCase )] else: __UpperCAmelCase : List[Any] = getattr(_UpperCamelCase , _UpperCamelCase ) if old_attribute == "": __UpperCAmelCase : List[Any] = old_model else: if not hasattr(_UpperCamelCase , _UpperCamelCase ): raise ValueError(f'''{old_model} does not have {old_attribute}''' ) __UpperCAmelCase : Optional[Any] = getattr(_UpperCamelCase , _UpperCamelCase ) if not is_key_init: raise ValueError(f'''{key} was not correctly initialized!''' ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) prophet.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : Tuple = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
358
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_pegasus import PegasusTokenizer else: UpperCAmelCase : Dict = None UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = "▁" UpperCAmelCase : List[Any] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"} UpperCAmelCase : Optional[Any] = { "vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"}, "tokenizer_file": { "google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json" }, } UpperCAmelCase : Any = { "google/pegasus-xsum": 512, } class lowerCamelCase__ ( SCREAMING_SNAKE_CASE__ ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = PegasusTokenizer __a = ['input_ids', 'attention_mask'] def __init__( self : Union[str, Any] , UpperCamelCase : List[Any]=None , UpperCamelCase : Any=None , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : List[Any]="<unk>" , UpperCamelCase : Optional[Any]="<mask_2>" , UpperCamelCase : int="<mask_1>" , UpperCamelCase : str=None , UpperCamelCase : Optional[int]=103 , **UpperCamelCase : List[str] , ): '''simple docstring''' __UpperCAmelCase : Any = offset if additional_special_tokens is not None: if not isinstance(a_ , a_ ): raise TypeError( f'''additional_special_tokens should be of type {type(a_ )}, but is''' f''' {type(a_ )}''' ) __UpperCAmelCase : List[str] = ( ([mask_token_sent] + additional_special_tokens) if mask_token_sent not in additional_special_tokens and mask_token_sent is not None else additional_special_tokens ) # fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken additional_special_tokens_extended += [ f'''<unk_{i}>''' for i in range(len(a_ ) , self.offset - 1 ) ] if len(set(a_ ) ) != len(a_ ): raise ValueError( """Please make sure that the provided additional_special_tokens do not contain an incorrectly""" f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' ) __UpperCAmelCase : Union[str, Any] = additional_special_tokens_extended else: __UpperCAmelCase : str = [mask_token_sent] if mask_token_sent is not None else [] additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )] super().__init__( a_ , tokenizer_file=a_ , pad_token=a_ , eos_token=a_ , unk_token=a_ , mask_token=a_ , mask_token_sent=a_ , offset=a_ , additional_special_tokens=a_ , **a_ , ) __UpperCAmelCase : Any = vocab_file __UpperCAmelCase : Dict = False if not self.vocab_file else True def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : int = set(self.all_special_ids ) # call it once instead of inside list comp all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ): raise ValueError( """There should be 3 special tokens: mask_token, pad_token, and eos_token +""" f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' ) return [1 if x in all_special_ids else 0 for x in seq] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List , UpperCamelCase : Optional[List] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return self._special_token_mask(a_ ) elif token_ids_a is None: return self._special_token_mask(a_ ) + [1] else: return self._special_token_mask(token_ids_a + token_ids_a ) + [1] def lowerCamelCase__ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : int=None ): '''simple docstring''' if token_ids_a is None: return token_ids_a + [self.eos_token_id] # We don't expect to process pairs, but leave the pair logic for API consistency return token_ids_a + token_ids_a + [self.eos_token_id] def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( """Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """ """tokenizer.""" ) if not os.path.isdir(a_ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Any = os.path.join( a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ): copyfile(self.vocab_file , a_ ) return (out_vocab_file,)
359
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : list ) -> float: '''simple docstring''' _validate_point(lowerCAmelCase__ ) _validate_point(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(a - b ) for a, b in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ) def lowerCamelCase ( _UpperCamelCase : list[float] ) -> None: '''simple docstring''' if point: if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ): for item in point: if not isinstance(lowerCAmelCase__ , (int, float) ): __UpperCAmelCase : str = ( """Expected a list of numbers as input, found """ f'''{type(lowerCAmelCase__ ).__name__}''' ) raise TypeError(lowerCAmelCase__ ) else: __UpperCAmelCase : Optional[int] = f'''Expected a list of numbers as input, found {type(lowerCAmelCase__ ).__name__}''' raise TypeError(lowerCAmelCase__ ) else: raise ValueError("""Missing an input""" ) def lowerCamelCase ( _UpperCamelCase : list , _UpperCamelCase : list ) -> float: '''simple docstring''' _validate_point(lowerCAmelCase__ ) _validate_point(lowerCAmelCase__ ) if len(lowerCAmelCase__ ) != len(lowerCAmelCase__ ): raise ValueError("""Both points must be in the same n-dimensional space""" ) return float(sum(abs(x - y ) for x, y in zip(lowerCAmelCase__ , lowerCAmelCase__ ) ) ) if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : str = { 'configuration_time_series_transformer': [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TimeSeriesTransformerConfig', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = [ 'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'TimeSeriesTransformerForPrediction', 'TimeSeriesTransformerModel', 'TimeSeriesTransformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimeSeriesTransformerConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_time_series_transformer import ( TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TimeSeriesTransformerForPrediction, TimeSeriesTransformerModel, TimeSeriesTransformerPreTrainedModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
361
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
0
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCamelCase__ ( __snake_case ): """simple docstring""" def __init__( self : int , UpperCamelCase : Optional[Any] , UpperCamelCase : int = None , UpperCamelCase : List[str] = None , UpperCamelCase : Union[str, Any] = True , UpperCamelCase : Optional[Any] = None , UpperCamelCase : Optional[int] = False , UpperCamelCase : str = None , UpperCamelCase : List[str] = True , UpperCamelCase : Tuple = "arrow" , **UpperCamelCase : Dict , ): '''simple docstring''' super().__init__( split=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , keep_in_memory=UpperCamelCase , streaming=UpperCamelCase , **UpperCamelCase , ) __UpperCAmelCase : Dict = load_from_cache_file __UpperCAmelCase : Dict = file_format __UpperCAmelCase : Union[str, Any] = Spark( df=UpperCamelCase , features=UpperCamelCase , cache_dir=UpperCamelCase , working_dir=UpperCamelCase , **UpperCamelCase , ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) __UpperCAmelCase : int = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
362
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
0
"""simple docstring""" import logging import os from dataclasses import dataclass from typing import List, Optional, Union import tqdm from filelock import FileLock from transformers import ( BartTokenizer, BartTokenizerFast, DataProcessor, PreTrainedTokenizer, RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, is_tf_available, is_torch_available, ) UpperCAmelCase : List[str] = logging.getLogger(__name__) @dataclass(frozen=A ) class lowerCamelCase__ : """simple docstring""" __a = 42 __a = 42 __a = None __a = None __a = None @dataclass(frozen=A ) class lowerCamelCase__ : """simple docstring""" __a = 42 __a = None __a = None __a = None __a = None if is_torch_available(): import torch from torch.utils.data import Dataset class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 def __init__( self : str , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = None , UpperCamelCase : str=False , UpperCamelCase : bool = False , ): '''simple docstring''' __UpperCAmelCase : int = hans_processors[task]() __UpperCAmelCase : Optional[Any] = os.path.join( _a , """cached_{}_{}_{}_{}""".format( """dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_a ) , _a , ) , ) __UpperCAmelCase : Optional[int] = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __UpperCAmelCase ,__UpperCAmelCase : Tuple = label_list[2], label_list[1] __UpperCAmelCase : Any = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __UpperCAmelCase : List[str] = cached_features_file + """.lock""" with FileLock(_a ): if os.path.exists(_a ) and not overwrite_cache: logger.info(f'''Loading features from cached file {cached_features_file}''' ) __UpperCAmelCase : Optional[int] = torch.load(_a ) else: logger.info(f'''Creating features from dataset file at {data_dir}''' ) __UpperCAmelCase : Tuple = ( processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a ) ) logger.info("""Training examples: %s""" , len(_a ) ) __UpperCAmelCase : str = hans_convert_examples_to_features(_a , _a , _a , _a ) logger.info("""Saving features into cached file %s""" , _a ) torch.save(self.features , _a ) def __len__( self : Union[str, Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : int , UpperCamelCase : int ): '''simple docstring''' return self.features[i] def lowerCamelCase__ ( self : str ): '''simple docstring''' return self.label_list if is_tf_available(): import tensorflow as tf class lowerCamelCase__ : """simple docstring""" __a = 42 def __init__( self : List[str] , UpperCamelCase : str , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : str , UpperCamelCase : Optional[int] = 128 , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : bool = False , ): '''simple docstring''' __UpperCAmelCase : List[str] = hans_processors[task]() __UpperCAmelCase : int = processor.get_labels() if tokenizer.__class__ in ( RobertaTokenizer, RobertaTokenizerFast, XLMRobertaTokenizer, BartTokenizer, BartTokenizerFast, ): # HACK(label indices are swapped in RoBERTa pretrained model) __UpperCAmelCase ,__UpperCAmelCase : Any = label_list[2], label_list[1] __UpperCAmelCase : Optional[Any] = label_list __UpperCAmelCase : int = processor.get_dev_examples(_a ) if evaluate else processor.get_train_examples(_a ) __UpperCAmelCase : List[Any] = hans_convert_examples_to_features(_a , _a , _a , _a ) def gen(): for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ): if ex_index % 10_000 == 0: logger.info("""Writing example %d of %d""" % (ex_index, len(_a )) ) yield ( { "example_id": 0, "input_ids": ex.input_ids, "attention_mask": ex.attention_mask, "token_type_ids": ex.token_type_ids, }, ex.label, ) __UpperCAmelCase : Optional[int] = tf.data.Dataset.from_generator( _a , ( { """example_id""": tf.intaa, """input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa, }, tf.intaa, ) , ( { """example_id""": tf.TensorShape([] ), """input_ids""": tf.TensorShape([None, None] ), """attention_mask""": tf.TensorShape([None, None] ), """token_type_ids""": tf.TensorShape([None, None] ), }, tf.TensorShape([] ), ) , ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return self.dataset def __len__( self : int ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' return self.features[i] def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' return self.label_list class lowerCamelCase__ ( A ): """simple docstring""" def lowerCamelCase__ ( self : str , UpperCamelCase : List[str] ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_train_set.txt""" ) ) , """train""" ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' return self._create_examples(self._read_tsv(os.path.join(_a , """heuristics_evaluation_set.txt""" ) ) , """dev""" ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return ["contradiction", "entailment", "neutral"] def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [] for i, line in enumerate(_a ): if i == 0: continue __UpperCAmelCase : Optional[Any] = """%s-%s""" % (set_type, line[0]) __UpperCAmelCase : List[Any] = line[5] __UpperCAmelCase : str = line[6] __UpperCAmelCase : List[Any] = line[7][2:] if line[7].startswith("""ex""" ) else line[7] __UpperCAmelCase : Union[str, Any] = line[0] examples.append(InputExample(guid=_a , text_a=_a , text_b=_a , label=_a , pairID=_a ) ) return examples def lowerCamelCase ( _UpperCamelCase : List[InputExample] , _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : PreTrainedTokenizer , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : str = {label: i for i, label in enumerate(UpperCamelCase__ )} __UpperCAmelCase : str = [] for ex_index, example in tqdm.tqdm(enumerate(UpperCamelCase__ ) , desc="""convert examples to features""" ): if ex_index % 1_0_0_0_0 == 0: logger.info("""Writing example %d""" % (ex_index) ) __UpperCAmelCase : List[str] = tokenizer( example.text_a , example.text_b , add_special_tokens=UpperCamelCase__ , max_length=UpperCamelCase__ , padding="""max_length""" , truncation=UpperCamelCase__ , return_overflowing_tokens=UpperCamelCase__ , ) __UpperCAmelCase : Any = label_map[example.label] if example.label in label_map else 0 __UpperCAmelCase : Tuple = int(example.pairID ) features.append(InputFeatures(**UpperCamelCase__ , label=UpperCamelCase__ , pairID=UpperCamelCase__ ) ) for i, example in enumerate(examples[:5] ): logger.info("""*** Example ***""" ) logger.info(f'''guid: {example}''' ) logger.info(f'''features: {features[i]}''' ) return features UpperCAmelCase : Tuple = { 'hans': 3, } UpperCAmelCase : Tuple = { 'hans': HansProcessor, }
363
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
0
"""simple docstring""" from pathlib import Path import fire def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[Any] = Path(_a ) __UpperCAmelCase : List[Any] = Path(_a ) dest_dir.mkdir(exist_ok=_a ) for path in src_dir.iterdir(): __UpperCAmelCase : int = [x.rstrip() for x in list(path.open().readlines() )][:n] __UpperCAmelCase : Dict = dest_dir.joinpath(path.name ) print(_a ) dest_path.open("""w""" ).write("""\n""".join(_a ) ) if __name__ == "__main__": fire.Fire(minify)
364
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
0
"""simple docstring""" from arguments import InitializationArguments from transformers import AutoConfig, AutoModelForCausalLM, AutoTokenizer, HfArgumentParser # Configuration UpperCAmelCase : Tuple = HfArgumentParser(InitializationArguments) UpperCAmelCase : str = parser.parse_args() # Load codeparrot tokenizer trained for Python code tokenization UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name) # Config: "scale_attn_by_layer_idx" and "reorder_and_upcast_attn" are Mistral stability tweaks UpperCAmelCase : int = { "vocab_size": len(tokenizer), "scale_attn_by_inverse_layer_idx": True, "reorder_and_upcast_attn": True, } # Load model config (GPT-2 large in this case) UpperCAmelCase : Tuple = AutoConfig.from_pretrained(args.config_name, **config_kwargs) # Initialize new model with config UpperCAmelCase : List[str] = AutoModelForCausalLM.from_config(config) # Save model to the hub model.save_pretrained(args.model_name, push_to_hub=args.push_to_hub)
365
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
0
"""simple docstring""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def lowerCamelCase ( _UpperCamelCase : Dict ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = model.config __UpperCAmelCase : Optional[int] = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 1_6, 3_2] , window_size=original_config.window_size , embed_dim=1_2_8 , ) __UpperCAmelCase : Tuple = MBartConfig( is_decoder=SCREAMING_SNAKE_CASE_ , is_encoder_decoder=SCREAMING_SNAKE_CASE_ , add_cross_attention=SCREAMING_SNAKE_CASE_ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=SCREAMING_SNAKE_CASE_ , add_final_layer_norm=SCREAMING_SNAKE_CASE_ , ) return encoder_config, decoder_config def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' if "encoder.model" in name: __UpperCAmelCase : Tuple = name.replace("""encoder.model""" , """encoder""" ) if "decoder.model" in name: __UpperCAmelCase : int = name.replace("""decoder.model""" , """decoder""" ) if "patch_embed.proj" in name: __UpperCAmelCase : Any = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __UpperCAmelCase : Optional[int] = name.replace("""patch_embed.norm""" , """embeddings.norm""" ) if name.startswith("""encoder""" ): if "layers" in name: __UpperCAmelCase : Optional[int] = """encoder.""" + name if "attn.proj" in name: __UpperCAmelCase : int = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name and "mask" not in name: __UpperCAmelCase : List[Any] = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __UpperCAmelCase : Tuple = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __UpperCAmelCase : Optional[int] = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __UpperCAmelCase : str = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __UpperCAmelCase : Any = name.replace("""mlp.fc2""" , """output.dense""" ) if name == "encoder.norm.weight": __UpperCAmelCase : List[str] = """encoder.layernorm.weight""" if name == "encoder.norm.bias": __UpperCAmelCase : List[Any] = """encoder.layernorm.bias""" return name def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Dict ) -> Union[str, Any]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __UpperCAmelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ ) if "qkv" in key: __UpperCAmelCase : Tuple = key.split(""".""" ) __UpperCAmelCase : Optional[int] = int(key_split[3] ) __UpperCAmelCase : List[str] = int(key_split[5] ) __UpperCAmelCase : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: __UpperCAmelCase : Union[str, Any] = val[:dim, :] __UpperCAmelCase : List[str] = val[dim : dim * 2, :] __UpperCAmelCase : str = val[-dim:, :] else: __UpperCAmelCase : Dict = val[:dim] __UpperCAmelCase : Dict = val[dim : dim * 2] __UpperCAmelCase : str = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: __UpperCAmelCase : List[Any] = val return orig_state_dict def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str]=None , _UpperCamelCase : Union[str, Any]=False ) -> Any: '''simple docstring''' __UpperCAmelCase : int = DonutModel.from_pretrained(SCREAMING_SNAKE_CASE_ ).eval() # load HuggingFace model __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = get_configs(SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Optional[int] = DonutSwinModel(SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Optional[int] = MBartForCausalLM(SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : List[Any] = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE_ , decoder=SCREAMING_SNAKE_CASE_ ) model.eval() __UpperCAmelCase : Tuple = original_model.state_dict() __UpperCAmelCase : Optional[Any] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) model.load_state_dict(SCREAMING_SNAKE_CASE_ ) # verify results on scanned document __UpperCAmelCase : Tuple = load_dataset("""hf-internal-testing/example-documents""" ) __UpperCAmelCase : List[Any] = dataset["""test"""][0]["""image"""].convert("""RGB""" ) __UpperCAmelCase : List[str] = XLMRobertaTokenizerFast.from_pretrained(SCREAMING_SNAKE_CASE_ , from_slow=SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : int = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) __UpperCAmelCase : Optional[Any] = DonutProcessor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Tuple = processor(SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": __UpperCAmelCase : Optional[Any] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>""" __UpperCAmelCase : str = """When is the coffee break?""" __UpperCAmelCase : Union[str, Any] = task_prompt.replace("""{user_input}""" , SCREAMING_SNAKE_CASE_ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": __UpperCAmelCase : Any = """<s_rvlcdip>""" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: __UpperCAmelCase : List[str] = """<s_cord>""" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": __UpperCAmelCase : List[str] = """s_cord-v2>""" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": __UpperCAmelCase : Any = """<s_zhtrainticket>""" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt __UpperCAmelCase : Tuple = """hello world""" else: raise ValueError("""Model name not supported""" ) __UpperCAmelCase : Optional[int] = original_model.decoder.tokenizer(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ , return_tensors="""pt""" )[ """input_ids""" ] __UpperCAmelCase : Any = original_model.encoder.model.patch_embed(SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase ,__UpperCAmelCase : Dict = model.encoder.embeddings(SCREAMING_SNAKE_CASE_ ) assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) # verify encoder hidden states __UpperCAmelCase : int = original_model.encoder(SCREAMING_SNAKE_CASE_ ) __UpperCAmelCase : Optional[Any] = model.encoder(SCREAMING_SNAKE_CASE_ ).last_hidden_state assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-2 ) # verify decoder hidden states __UpperCAmelCase : Dict = original_model(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ).logits __UpperCAmelCase : str = model(SCREAMING_SNAKE_CASE_ , decoder_input_ids=SCREAMING_SNAKE_CASE_ ).logits assert torch.allclose(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model and processor to {pytorch_dump_folder_path}''' ) model.save_pretrained(SCREAMING_SNAKE_CASE_ ) processor.save_pretrained(SCREAMING_SNAKE_CASE_ ) if push_to_hub: model.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) processor.push_to_hub("""nielsr/""" + model_name.split("""/""" )[-1] , commit_message="""Update model""" ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='naver-clova-ix/donut-base-finetuned-docvqa', required=False, type=str, help='Name of the original model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, required=False, type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model and processor to the 🤗 hub.', ) UpperCAmelCase : List[str] = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
366
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
0
"""simple docstring""" import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel UpperCAmelCase : int = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 4_8000, 'sample_size': 6_5536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 4_8000, 'sample_size': 6_5536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 4_8000, 'sample_size': 13_1072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 1_6000, 'sample_size': 6_5536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 1_6000, 'sample_size': 6_5536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 1_6000, 'sample_size': 6_5536, }, } def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] ) -> List[str]: '''simple docstring''' return torch.atana(UpperCamelCase__ , UpperCamelCase__ ) / math.pi * 2 def lowerCamelCase ( _UpperCamelCase : str ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Dict = torch.sin(t * math.pi / 2 ) ** 2 __UpperCAmelCase : List[str] = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(UpperCamelCase__ , UpperCamelCase__ ) class lowerCamelCase__ ( UpperCamelCase__ ): """simple docstring""" pass class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : Optional[Any] ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = DiffusionAttnUnetaD(__lowerCamelCase , n_attn_layers=4 ) __UpperCAmelCase : Optional[Any] = deepcopy(self.diffusion ) __UpperCAmelCase : str = torch.quasirandom.SobolEngine(1 , scramble=__lowerCamelCase ) def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' UpperCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } UpperCAmelCase : Union[str, Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } UpperCAmelCase : str = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } UpperCAmelCase : List[str] = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } UpperCAmelCase : Dict = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } UpperCAmelCase : List[str] = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> Optional[Any]: '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""" , RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def lowerCamelCase ( _UpperCamelCase : str ) -> Any: '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(UpperCamelCase__ ) and not isinstance(UpperCamelCase__ , UpperCamelCase__ ): return name.replace(UpperCamelCase__ , UpperCamelCase__ ) elif name.startswith(UpperCamelCase__ ): return [name.replace(UpperCamelCase__ , UpperCamelCase__ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : List[Any]=1_3 ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : int = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""" , """time_proj""" ) __UpperCAmelCase : Dict = 0 if string.startswith("""net.3.""" ): depth += 1 __UpperCAmelCase : Tuple = string[6:] elif string.startswith("""net.""" ): __UpperCAmelCase : Tuple = string[4:] while string.startswith("""main.7.""" ): depth += 1 __UpperCAmelCase : List[str] = string[7:] if string.startswith("""main.""" ): __UpperCAmelCase : Optional[Any] = string[5:] # mid block if string[:2].isdigit(): __UpperCAmelCase : Optional[Any] = string[:2] __UpperCAmelCase : Dict = string[2:] else: __UpperCAmelCase : str = string[0] __UpperCAmelCase : Any = string[1:] if depth == max_depth: __UpperCAmelCase : List[Any] = MID_NUM_TO_LAYER[layer_num] __UpperCAmelCase : int = """mid_block""" elif depth > 0 and int(UpperCamelCase__ ) < 7: __UpperCAmelCase : int = DOWN_NUM_TO_LAYER[layer_num] __UpperCAmelCase : Optional[int] = f'''down_blocks.{depth}''' elif depth > 0 and int(UpperCamelCase__ ) > 7: __UpperCAmelCase : Tuple = UP_NUM_TO_LAYER[layer_num] __UpperCAmelCase : str = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __UpperCAmelCase : int = DEPTH_0_TO_LAYER[layer_num] __UpperCAmelCase : int = f'''up_blocks.{max_depth - 1}''' if int(UpperCamelCase__ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __UpperCAmelCase : Union[str, Any] = string_left[1:] if "resnets" in new_layer: __UpperCAmelCase : Optional[int] = convert_resconv_naming(UpperCamelCase__ ) elif "attentions" in new_layer: __UpperCAmelCase : str = convert_attn_naming(UpperCamelCase__ ) __UpperCAmelCase : Tuple = new_string_left if not isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase : int = prefix + """.""" + new_layer + """.""" + string_left else: __UpperCAmelCase : List[str] = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[int] = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __UpperCAmelCase : List[Any] = rename(UpperCamelCase__ ) # check if we need to transform from Conv => Linear for attention if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __UpperCAmelCase : Optional[int] = transform_conv_attns(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) else: __UpperCAmelCase : Union[str, Any] = v return new_state_dict def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[Any] ) -> Optional[int]: '''simple docstring''' if len(UpperCamelCase__ ) == 1: if len(v.shape ) == 3: # weight __UpperCAmelCase : Union[str, Any] = v[:, :, 0] else: # bias __UpperCAmelCase : List[Any] = v else: # qkv matrices __UpperCAmelCase : int = v.shape[0] __UpperCAmelCase : Optional[int] = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __UpperCAmelCase : Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __UpperCAmelCase : List[Any] = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __UpperCAmelCase : str = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __UpperCAmelCase : Dict = download(UpperCamelCase__ ) __UpperCAmelCase : Dict = MODELS_MAP[model_name]["""sample_rate"""] __UpperCAmelCase : Union[str, Any] = MODELS_MAP[model_name]["""sample_size"""] __UpperCAmelCase : Optional[Any] = Object() __UpperCAmelCase : Optional[Any] = sample_size __UpperCAmelCase : List[Any] = sample_rate __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : Optional[Any] = UNetaDModel(sample_size=UpperCamelCase__ , sample_rate=UpperCamelCase__ ) __UpperCAmelCase : Dict = diffusers_model.state_dict() __UpperCAmelCase : Tuple = DiffusionUncond(UpperCamelCase__ ) orig_model.load_state_dict(torch.load(args.model_path , map_location=UpperCamelCase__ )["""state_dict"""] ) __UpperCAmelCase : Union[str, Any] = orig_model.diffusion_ema.eval() __UpperCAmelCase : Optional[int] = orig_model.state_dict() __UpperCAmelCase : Tuple = rename_orig_weights(UpperCamelCase__ ) __UpperCAmelCase : Union[str, Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __UpperCAmelCase : int = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(UpperCamelCase__ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(UpperCamelCase__ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __UpperCAmelCase : Union[str, Any] = value.squeeze() __UpperCAmelCase : str = value diffusers_model.load_state_dict(UpperCamelCase__ ) __UpperCAmelCase : List[str] = 1_0_0 __UpperCAmelCase : Optional[Any] = 3_3 __UpperCAmelCase : Optional[int] = IPNDMScheduler(num_train_timesteps=UpperCamelCase__ ) __UpperCAmelCase : Optional[Any] = torch.manual_seed(UpperCamelCase__ ) __UpperCAmelCase : int = torch.randn([1, 2, config.sample_size] , generator=UpperCamelCase__ ).to(UpperCamelCase__ ) __UpperCAmelCase : int = torch.linspace(1 , 0 , steps + 1 , device=UpperCamelCase__ )[:-1] __UpperCAmelCase : str = get_crash_schedule(UpperCamelCase__ ) __UpperCAmelCase : int = DanceDiffusionPipeline(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ ) __UpperCAmelCase : str = torch.manual_seed(3_3 ) __UpperCAmelCase : Optional[int] = pipe(num_inference_steps=UpperCamelCase__ , generator=UpperCamelCase__ ).audios __UpperCAmelCase : Tuple = sampling.iplms_sample(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {} ) __UpperCAmelCase : Any = generated.clamp(-1 , 1 ) __UpperCAmelCase : List[str] = (generated - audio).abs().sum() __UpperCAmelCase : List[Any] = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""" , UpperCamelCase__ ) print("""Diff max""" , UpperCamelCase__ ) assert diff_max < 1E-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') UpperCAmelCase : Tuple = parser.parse_args() main(args)
367
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
0
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : str ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = sorted(zip(__lowerCamelCase , __lowerCamelCase ) , key=lambda _UpperCamelCase : x[0] / x[1] , reverse=__lowerCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : Dict = [i[0] for i in r], [i[1] for i in r] __UpperCAmelCase : List[Any] = list(accumulate(__lowerCamelCase ) ) __UpperCAmelCase : Tuple = bisect(__lowerCamelCase , __lowerCamelCase ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
368
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
0
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) UpperCAmelCase : str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} UpperCAmelCase : List[Any] = { "vocab_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-ctx_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-ctx_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase : List[Any] = { "vocab_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-question_encoder-single-nq-base": ( "https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-question_encoder-multiset-base": ( "https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase : Any = { "vocab_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt" ), }, "tokenizer_file": { "facebook/dpr-reader-single-nq-base": ( "https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json" ), "facebook/dpr-reader-multiset-base": ( "https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json" ), }, } UpperCAmelCase : List[str] = { "facebook/dpr-ctx_encoder-single-nq-base": 512, "facebook/dpr-ctx_encoder-multiset-base": 512, } UpperCAmelCase : str = { "facebook/dpr-question_encoder-single-nq-base": 512, "facebook/dpr-question_encoder-multiset-base": 512, } UpperCAmelCase : Optional[Any] = { "facebook/dpr-reader-single-nq-base": 512, "facebook/dpr-reader-multiset-base": 512, } UpperCAmelCase : Optional[int] = { "facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True}, } UpperCAmelCase : List[str] = { "facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True}, "facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True}, } UpperCAmelCase : Optional[Any] = { "facebook/dpr-reader-single-nq-base": {"do_lower_case": True}, "facebook/dpr-reader-multiset-base": {"do_lower_case": True}, } class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP __a = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP __a = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION UpperCAmelCase : List[Any] = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) UpperCAmelCase : str = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) UpperCAmelCase : List[str] = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n " @add_start_docstrings(A ) class lowerCamelCase__ : """simple docstring""" def __call__( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] = None , UpperCamelCase : List[Any] = None , UpperCamelCase : int = False , UpperCamelCase : Optional[Any] = False , UpperCamelCase : Dict = None , UpperCamelCase : Tuple = None , UpperCamelCase : Tuple = None , **UpperCamelCase : Any , ): '''simple docstring''' if titles is None and texts is None: return super().__call__( _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , ) elif titles is None or texts is None: __UpperCAmelCase : Optional[int] = titles if texts is None else texts return super().__call__( _a , _a , padding=_a , truncation=_a , max_length=_a , return_tensors=_a , return_attention_mask=_a , **_a , ) __UpperCAmelCase : Dict = titles if not isinstance(_a , _a ) else [titles] __UpperCAmelCase : Any = texts if not isinstance(_a , _a ) else [texts] __UpperCAmelCase : Dict = len(_a ) __UpperCAmelCase : Tuple = questions if not isinstance(_a , _a ) else [questions] * n_passages if len(_a ) != len(_a ): raise ValueError( f'''There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.''' ) __UpperCAmelCase : int = super().__call__(_a , _a , padding=_a , truncation=_a )["input_ids"] __UpperCAmelCase : Dict = super().__call__(_a , add_special_tokens=_a , padding=_a , truncation=_a )["input_ids"] __UpperCAmelCase : Any = { "input_ids": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(_a , _a ) ] } if return_attention_mask is not False: __UpperCAmelCase : Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __UpperCAmelCase : Tuple = attention_mask return self.pad(_a , padding=_a , max_length=_a , return_tensors=_a ) def lowerCamelCase__ ( self : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : int = 16 , UpperCamelCase : Any = 64 , UpperCamelCase : List[Any] = 4 , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = reader_input["input_ids"] __UpperCAmelCase : Union[str, Any] = reader_output[:3] __UpperCAmelCase : List[str] = len(_a ) __UpperCAmelCase : Tuple = sorted(range(_a ) , reverse=_a , key=relevance_logits.__getitem__ ) __UpperCAmelCase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __UpperCAmelCase : Tuple = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __UpperCAmelCase : List[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __UpperCAmelCase : Union[str, Any] = sequence_ids.index(self.pad_token_id ) else: __UpperCAmelCase : str = len(_a ) __UpperCAmelCase : Tuple = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_a , top_spans=_a , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_a , start_index=_a , end_index=_a , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(_a ) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = [] for start_index, start_score in enumerate(_a ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __UpperCAmelCase : Union[str, Any] = sorted(_a , key=lambda UpperCamelCase : x[1] , reverse=_a ) __UpperCAmelCase : List[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''' ) __UpperCAmelCase : str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(_a ) == top_spans: break return chosen_span_intervals @add_end_docstrings(A ) class lowerCamelCase__ ( A , A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = READER_PRETRAINED_VOCAB_FILES_MAP __a = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = READER_PRETRAINED_INIT_CONFIGURATION __a = ["""input_ids""", """attention_mask"""]
369
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
0
"""simple docstring""" import random import unittest import numpy as np from diffusers import ( DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, OnnxStableDiffusionImgaImgPipeline, PNDMScheduler, ) from diffusers.utils import floats_tensor from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class lowerCamelCase__ ( _a , unittest.TestCase ): """simple docstring""" __a = """hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline""" def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[str]=0 ): '''simple docstring''' __UpperCAmelCase : str = floats_tensor((1, 3, 128, 128) , rng=random.Random(_a ) ) __UpperCAmelCase : List[str] = np.random.RandomState(_a ) __UpperCAmelCase : int = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 3, "strength": 0.75, "guidance_scale": 7.5, "output_type": "numpy", } return inputs def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) pipe.set_progress_bar_config(disable=_a ) __UpperCAmelCase : List[str] = self.get_dummy_inputs() __UpperCAmelCase : Optional[int] = pipe(**_a ).images __UpperCAmelCase : Any = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 128, 128, 3) __UpperCAmelCase : List[str] = np.array([0.69643, 0.58484, 0.50314, 0.58760, 0.55368, 0.59643, 0.51529, 0.41217, 0.49087] ) assert np.abs(image_slice - expected_slice ).max() < 1e-1 def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase : str = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_a ) pipe.set_progress_bar_config(disable=_a ) __UpperCAmelCase : List[Any] = self.get_dummy_inputs() __UpperCAmelCase : Dict = pipe(**_a ).images __UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase : Optional[int] = np.array([0.61737, 0.54642, 0.53183, 0.54465, 0.52742, 0.60525, 0.49969, 0.40655, 0.48154] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) # warmup pass to apply optimizations __UpperCAmelCase : int = pipe(**self.get_dummy_inputs() ) __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs() __UpperCAmelCase : Dict = pipe(**_a ).images __UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase : Optional[Any] = np.array([0.52761, 0.59977, 0.49033, 0.49619, 0.54282, 0.50311, 0.47600, 0.40918, 0.45203] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase : Any = EulerDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) __UpperCAmelCase : int = self.get_dummy_inputs() __UpperCAmelCase : Dict = pipe(**_a ).images __UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase : int = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase : Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) __UpperCAmelCase : List[Any] = self.get_dummy_inputs() __UpperCAmelCase : Any = pipe(**_a ).images __UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase : Optional[int] = np.array([0.52911, 0.60004, 0.49229, 0.49805, 0.54502, 0.50680, 0.47777, 0.41028, 0.45304] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="""CPUExecutionProvider""" ) __UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.set_progress_bar_config(disable=_a ) __UpperCAmelCase : Optional[Any] = self.get_dummy_inputs() __UpperCAmelCase : List[str] = pipe(**_a ).images __UpperCAmelCase : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 128, 128, 3) __UpperCAmelCase : str = np.array([0.65331, 0.58277, 0.48204, 0.56059, 0.53665, 0.56235, 0.50969, 0.40009, 0.46552] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 @nightly @require_onnxruntime @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = ort.SessionOptions() __UpperCAmelCase : Dict = False return options def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase : List[str] = init_image.resize((768, 512) ) # using the PNDM scheduler by default __UpperCAmelCase : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""onnx""" , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) __UpperCAmelCase : int = "A fantasy landscape, trending on artstation" __UpperCAmelCase : int = np.random.RandomState(0 ) __UpperCAmelCase : List[Any] = pipe( prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_a , output_type="""np""" , ) __UpperCAmelCase : List[Any] = output.images __UpperCAmelCase : str = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase : List[str] = np.array([0.4909, 0.5059, 0.5372, 0.4623, 0.4876, 0.5049, 0.4820, 0.4956, 0.5019] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : str = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/img2img/sketch-mountains-input.jpg""" ) __UpperCAmelCase : Tuple = init_image.resize((768, 512) ) __UpperCAmelCase : Tuple = LMSDiscreteScheduler.from_pretrained( """runwayml/stable-diffusion-v1-5""" , subfolder="""scheduler""" , revision="""onnx""" ) __UpperCAmelCase : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained( """runwayml/stable-diffusion-v1-5""" , revision="""onnx""" , scheduler=_a , safety_checker=_a , feature_extractor=_a , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_a ) __UpperCAmelCase : int = "A fantasy landscape, trending on artstation" __UpperCAmelCase : List[Any] = np.random.RandomState(0 ) __UpperCAmelCase : str = pipe( prompt=_a , image=_a , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_a , output_type="""np""" , ) __UpperCAmelCase : List[Any] = output.images __UpperCAmelCase : Dict = images[0, 255:258, 383:386, -1] assert images.shape == (1, 512, 768, 3) __UpperCAmelCase : List[str] = np.array([0.8043, 0.926, 0.9581, 0.8119, 0.8954, 0.913, 0.7209, 0.7463, 0.7431] ) # TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
370
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
0
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase ( _UpperCamelCase : Optional[int]=3_2 , _UpperCamelCase : Optional[int]=1_0 , _UpperCamelCase : Tuple=1_0_0 , _UpperCamelCase : Any=1_0_2_6 , _UpperCamelCase : int=True , _UpperCamelCase : str="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase : Optional[int]="igf_context_pairs.jbl" , ) -> List[str]: '''simple docstring''' set_seed(3 ) # generate train_data and objective_set __UpperCAmelCase : int = generate_datasets( _lowerCAmelCase , _lowerCAmelCase , number=_lowerCAmelCase , min_len=1_0_2_6 , trim=_lowerCAmelCase ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? __UpperCAmelCase : Union[str, Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model __UpperCAmelCase : Optional[int] = load_gpta("""gpt2""" ).to(_lowerCAmelCase ) print("""computing perplexity on objective set""" ) __UpperCAmelCase : List[Any] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).item() print("""perplexity on objective set:""" , _lowerCAmelCase ) # collect igf pairs and save to file demo.jbl collect_objective_set(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict=1_5 , _UpperCamelCase : Optional[int]=1_2_8 , _UpperCamelCase : int=1_0_0 , _UpperCamelCase : Union[str, Any]="igf_model.pt" , ) -> Union[str, Any]: '''simple docstring''' set_seed(4_2 ) # Load pre-trained model __UpperCAmelCase : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model __UpperCAmelCase : str = SecondaryLearner(_lowerCAmelCase ) # Train secondary learner __UpperCAmelCase : List[Any] = train_secondary_learner( _lowerCAmelCase , _lowerCAmelCase , max_epochs=_lowerCAmelCase , batch_size=_lowerCAmelCase , eval_freq=1_0_0 , igf_model_path=_lowerCAmelCase , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : str , _UpperCamelCase : str=3_2 , _UpperCamelCase : List[Any]=1_0_0_0 , _UpperCamelCase : Dict=1_6 , _UpperCamelCase : Any=1.0 , _UpperCamelCase : Dict=recopy_gpta , _UpperCamelCase : Optional[int]=None , _UpperCamelCase : str=1_0 , _UpperCamelCase : Any="gpt2_finetuned.pt" , ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) __UpperCAmelCase : Optional[Any] = RandomSampler(_lowerCAmelCase ) __UpperCAmelCase : Any = DataLoader(_lowerCAmelCase , sampler=_lowerCAmelCase ) __UpperCAmelCase : Optional[Any] = max_steps // (len(_lowerCAmelCase )) + 1 __UpperCAmelCase : Dict = 0 __UpperCAmelCase : Union[str, Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_lowerCAmelCase ) __UpperCAmelCase : Any = recopy_model(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) model.train() if secondary_learner is not None: secondary_learner.to(_lowerCAmelCase ) secondary_learner.eval() __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : Any = [] # Compute the performance of the transformer model at the beginning __UpperCAmelCase : Optional[int] = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print("""Test perplexity, step""" , _lowerCAmelCase , """:""" , _lowerCAmelCase ) for epoch in range(int(_lowerCAmelCase ) ): for step, example in enumerate(_lowerCAmelCase ): torch.cuda.empty_cache() __UpperCAmelCase : List[str] = random.randint(0 , example.size(2 ) - context_len - 1 ) __UpperCAmelCase : Optional[Any] = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() __UpperCAmelCase : Optional[Any] = model(_lowerCAmelCase , labels=_lowerCAmelCase ) __UpperCAmelCase : Optional[Any] = True if secondary_learner is not None: __UpperCAmelCase : Optional[Any] = secondary_learner.forward( torch.tensor(_lowerCAmelCase , dtype=torch.long , device=_lowerCAmelCase ).unsqueeze(0 ) )[0].item() observed_qs.append(float(_lowerCAmelCase ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 1_0: __UpperCAmelCase : int = -1 if predicted_q < threshold: __UpperCAmelCase : Union[str, Any] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) __UpperCAmelCase : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() __UpperCAmelCase : Optional[int] = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: __UpperCAmelCase : int = compute_perplexity(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) test_perps.append(_lowerCAmelCase ) print("""Test perplexity, step""" , _lowerCAmelCase , """:""" , _lowerCAmelCase ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 6_0: break if max_steps > 0 and global_step > 6_0: break # save finetuned transformer model torch.save(model.state_dict() , _lowerCAmelCase ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase ( ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=_lowerCAmelCase , type=_lowerCAmelCase , required=_lowerCAmelCase , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=_lowerCAmelCase , default=_lowerCAmelCase , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=3_2 , type=_lowerCAmelCase , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=1_0_0 , type=_lowerCAmelCase , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=1_0_0 , type=_lowerCAmelCase , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_0_0_0 , type=_lowerCAmelCase , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=1_2_8 , type=_lowerCAmelCase , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=1_6 , type=_lowerCAmelCase , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=1_0 , type=_lowerCAmelCase , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=1_0_0 , type=_lowerCAmelCase , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_0_2_6 , type=_lowerCAmelCase , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=1_5 , type=_lowerCAmelCase , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=_lowerCAmelCase , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_lowerCAmelCase , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=_lowerCAmelCase , type=_lowerCAmelCase , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=_lowerCAmelCase , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner __UpperCAmelCase : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner __UpperCAmelCase : int = training_secondary_learner( _lowerCAmelCase , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model __UpperCAmelCase : Dict = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(4_2 ) # Generate train and test data to train and evaluate gpt2 model __UpperCAmelCase : Dict = generate_datasets( context_len=3_2 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_0_0 , min_len=1_0_2_6 , trim=_lowerCAmelCase ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=_lowerCAmelCase , secondary_learner=_lowerCAmelCase , eval_interval=1_0 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
371
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
0
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. UpperCAmelCase : int = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. UpperCAmelCase : Tuple = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. UpperCAmelCase : List[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Optional[int] ) -> tuple[str, float]: '''simple docstring''' __UpperCAmelCase : List[Any] = len([g for position, g in enumerate(__lowerCAmelCase ) if g == main_target[position]] ) return (item, float(__lowerCAmelCase )) def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> tuple[str, str]: '''simple docstring''' __UpperCAmelCase : Tuple = random.randint(0 , len(__lowerCAmelCase ) - 1 ) __UpperCAmelCase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] __UpperCAmelCase : Any = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = list(__lowerCAmelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __UpperCAmelCase : List[str] = random.choice(__lowerCAmelCase ) return "".join(__lowerCAmelCase ) def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] , ) -> list[str]: '''simple docstring''' __UpperCAmelCase : Dict = [] # Generate more children proportionally to the fitness score. __UpperCAmelCase : List[str] = int(parent_a[1] * 1_0_0 ) + 1 __UpperCAmelCase : Optional[int] = 1_0 if child_n >= 1_0 else child_n for _ in range(__lowerCAmelCase ): __UpperCAmelCase : List[str] = population_score[random.randint(0 , __lowerCAmelCase )][0] __UpperCAmelCase : Union[str, Any] = crossover(parent_a[0] , __lowerCAmelCase ) # Append new string to the population list. pop.append(mutate(__lowerCAmelCase , __lowerCAmelCase ) ) pop.append(mutate(__lowerCAmelCase , __lowerCAmelCase ) ) return pop def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] = True ) -> tuple[int, int, str]: '''simple docstring''' if N_POPULATION < N_SELECTED: __UpperCAmelCase : List[str] = f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(__lowerCAmelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __UpperCAmelCase : List[Any] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __UpperCAmelCase : List[str] = f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(__lowerCAmelCase ) # Generate random starting population. __UpperCAmelCase : Tuple = [] for _ in range(__lowerCAmelCase ): population.append("""""".join([random.choice(__lowerCAmelCase ) for i in range(len(__lowerCAmelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __UpperCAmelCase : Union[str, Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__lowerCAmelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __UpperCAmelCase : str = [evaluate(__lowerCAmelCase , __lowerCAmelCase ) for item in population] # Check if there is a matching evolution. __UpperCAmelCase : str = sorted(__lowerCAmelCase , key=lambda _UpperCamelCase : x[1] , reverse=__lowerCAmelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 1_0 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __UpperCAmelCase : Optional[Any] = population[: int(N_POPULATION / 3 )] population.clear() population.extend(__lowerCAmelCase ) # Normalize population score to be between 0 and 1. __UpperCAmelCase : Dict = [ (item, score / len(__lowerCAmelCase )) for item, score in population_score ] # This is selection for i in range(__lowerCAmelCase ): population.extend(select(population_score[int(__lowerCAmelCase )] , __lowerCAmelCase , __lowerCAmelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__lowerCAmelCase ) > N_POPULATION: break if __name__ == "__main__": UpperCAmelCase : List[Any] = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) UpperCAmelCase : List[str] = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Tuple = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
350
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
0
"""simple docstring""" import argparse from argparse import Namespace import torch from torch import nn from transformers import XGLMConfig, XGLMForCausalLM def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = [ 'decoder.version', 'decoder.output_projection.weight', '_float_tensor', 'decoder.embed_positions._float_tensor', ] for k in ignore_keys: state_dict.pop(_UpperCAmelCase , _UpperCAmelCase ) def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = emb.weight.shape __UpperCAmelCase : Optional[Any] = nn.Linear(_UpperCAmelCase , _UpperCAmelCase , bias=_UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = emb.weight.data return lin_layer def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = torch.load(_UpperCAmelCase , map_location="""cpu""" ) __UpperCAmelCase : str = Namespace(**checkpoint["""cfg"""]["""model"""] ) __UpperCAmelCase : Optional[int] = checkpoint['model'] remove_ignore_keys_(_UpperCAmelCase ) __UpperCAmelCase : Union[str, Any] = state_dict['decoder.embed_tokens.weight'].shape[0] __UpperCAmelCase : Any = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()} __UpperCAmelCase : int = XGLMConfig( vocab_size=_UpperCAmelCase , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , ) __UpperCAmelCase : int = XGLMForCausalLM(_UpperCAmelCase ) __UpperCAmelCase : Optional[Any] = model.load_state_dict(_UpperCAmelCase , strict=_UpperCAmelCase ) print(_UpperCAmelCase ) __UpperCAmelCase : int = make_linear_from_emb(model.model.embed_tokens ) return model if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.') parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') UpperCAmelCase : Any = parser.parse_args() UpperCAmelCase : int = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path) model.save_pretrained(args.pytorch_dump_folder_path)
351
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaImgaImgPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase__ ( lowerCAmelCase_ , unittest.TestCase ): """simple docstring""" __a = KandinskyVaaImgaImgPipeline __a = ["""image_embeds""", """negative_image_embeds""", """image"""] __a = [ """image_embeds""", """negative_image_embeds""", """image""", ] __a = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] __a = False @property def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return 100 @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __UpperCAmelCase : int = UNetaDConditionModel(**__SCREAMING_SNAKE_CASE ) return model @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Tuple = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.dummy_unet __UpperCAmelCase : List[str] = self.dummy_movq __UpperCAmelCase : Optional[int] = { """num_train_timesteps""": 1_000, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __UpperCAmelCase : List[str] = DDIMScheduler(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[int] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=0 ): '''simple docstring''' __UpperCAmelCase : str = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Any = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __SCREAMING_SNAKE_CASE ) # create init_image __UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__SCREAMING_SNAKE_CASE ) ).to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCAmelCase : str = Image.fromarray(np.uinta(__SCREAMING_SNAKE_CASE ) ).convert("""RGB""" ).resize((256, 256) ) if str(__SCREAMING_SNAKE_CASE ).startswith("""mps""" ): __UpperCAmelCase : Optional[int] = torch.manual_seed(__SCREAMING_SNAKE_CASE ) else: __UpperCAmelCase : Union[str, Any] = torch.Generator(device=__SCREAMING_SNAKE_CASE ).manual_seed(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Any = { """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Any = """cpu""" __UpperCAmelCase : Optional[int] = self.get_dummy_components() __UpperCAmelCase : Dict = self.pipeline_class(**__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[int] = pipe.to(__SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[str] = pipe(**self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : Optional[Any] = output.images __UpperCAmelCase : List[str] = pipe( **self.get_dummy_inputs(__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE , )[0] __UpperCAmelCase : int = image[0, -3:, -3:, -1] __UpperCAmelCase : Dict = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : Any = np.array( [0.6199778, 0.63984406, 0.46145785, 0.62944984, 0.5622215, 0.47306132, 0.47441456, 0.4607606, 0.48719263] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_img2img_frog.npy""" ) __UpperCAmelCase : Dict = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __UpperCAmelCase : List[str] = """A red cartoon frog, 4k""" __UpperCAmelCase : int = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[int] = KandinskyVaaImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa ) __UpperCAmelCase : str = pipeline.to(__SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=__SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Tuple = torch.Generator(device="""cpu""" ).manual_seed(0 ) __UpperCAmelCase ,__UpperCAmelCase : List[Any] = pipe_prior( __SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __UpperCAmelCase : Tuple = pipeline( image=__SCREAMING_SNAKE_CASE , image_embeds=__SCREAMING_SNAKE_CASE , negative_image_embeds=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) __UpperCAmelCase : List[str] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
352
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
0
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[Any] , _UpperCamelCase : str = 1 , _UpperCamelCase : Union[str, Any] = 1 , _UpperCamelCase : Tuple = 1.0E4 , _UpperCamelCase : Optional[Any] = False , _UpperCamelCase : Optional[int] = 1.0 , ) -> Any: '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' __UpperCAmelCase : Union[str, Any] = float(embedding_dim // 2 ) __UpperCAmelCase : int = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __UpperCAmelCase : str = min_timescale * jnp.exp(jnp.arange(lowerCamelCase__ , dtype=jnp.floataa ) * -log_timescale_increment ) __UpperCAmelCase : str = jnp.expand_dims(lowerCamelCase__ , 1 ) * jnp.expand_dims(lowerCamelCase__ , 0 ) # scale embeddings __UpperCAmelCase : Tuple = scale * emb if flip_sin_to_cos: __UpperCAmelCase : Dict = jnp.concatenate([jnp.cos(lowerCamelCase__ ), jnp.sin(lowerCamelCase__ )] , axis=1 ) else: __UpperCAmelCase : str = jnp.concatenate([jnp.sin(lowerCamelCase__ ), jnp.cos(lowerCamelCase__ )] , axis=1 ) __UpperCAmelCase : List[str] = jnp.reshape(lowerCamelCase__ , [jnp.shape(lowerCamelCase__ )[0], embedding_dim] ) return signal class lowerCamelCase__ ( nn.Module ): """simple docstring""" __a = 32 __a = jnp.floataa @nn.compact def __call__( self : Optional[int] , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(UpperCamelCase ) __UpperCAmelCase : Tuple = nn.silu(UpperCamelCase ) __UpperCAmelCase : List[str] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(UpperCamelCase ) return temb class lowerCamelCase__ ( nn.Module ): """simple docstring""" __a = 32 __a = False __a = 1 @nn.compact def __call__( self : List[Any] , UpperCamelCase : Tuple ): '''simple docstring''' return get_sinusoidal_embeddings( UpperCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = len(_lowerCAmelCase ) __UpperCAmelCase : str = [] for i in range(len(_lowerCAmelCase ) - pat_len + 1 ): __UpperCAmelCase : Optional[int] = True for j in range(_lowerCAmelCase ): if s[i + j] != pattern[j]: __UpperCAmelCase : str = False break if match_found: position.append(_lowerCAmelCase ) return position if __name__ == "__main__": assert naive_pattern_search('ABCDEFG', 'DE') == [3] print(naive_pattern_search('ABAAABCDBBABCDDEBCABC', 'ABC'))
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING UpperCAmelCase : Dict = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase__ ) class lowerCamelCase__ ( lowerCAmelCase__ ): """simple docstring""" def __init__( self : Optional[int] , **UpperCamelCase : Any ): '''simple docstring''' super().__init__(**_SCREAMING_SNAKE_CASE ) requires_backends(self , """vision""" ) requires_backends(self , """torch""" ) if self.framework != "pt": raise ValueError(f'''The {self.__class__} is only available in PyTorch.''' ) self.check_model_type(_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : List[str] = {} __UpperCAmelCase : Dict = {} __UpperCAmelCase : Dict = {} # preprocess args if "points_per_batch" in kwargs: __UpperCAmelCase : Any = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: __UpperCAmelCase : Optional[int] = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: __UpperCAmelCase : List[str] = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: __UpperCAmelCase : str = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: __UpperCAmelCase : int = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: __UpperCAmelCase : int = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: __UpperCAmelCase : Optional[int] = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: __UpperCAmelCase : List[str] = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: __UpperCAmelCase : str = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: __UpperCAmelCase : List[Any] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: __UpperCAmelCase : Any = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: __UpperCAmelCase : Any = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : Any , UpperCamelCase : int , *UpperCamelCase : Tuple , UpperCamelCase : int=None , UpperCamelCase : Tuple=None , **UpperCamelCase : Dict ): '''simple docstring''' return super().__call__(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , num_workers=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : Dict=64 , UpperCamelCase : Optional[int] = 0 , UpperCamelCase : int = 512 / 1_500 , UpperCamelCase : List[Any] = 32 , UpperCamelCase : Optional[Any] = 1 , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = load_image(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : int = self.image_processor.size["""longest_edge"""] __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = self.image_processor.generate_crop_boxes( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[int] = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) with self.device_placement(): if self.framework == "pt": __UpperCAmelCase : int = self.get_inference_context() with inference_context(): __UpperCAmelCase : int = self._ensure_tensor_on_device(_SCREAMING_SNAKE_CASE , device=self.device ) __UpperCAmelCase : Optional[int] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) ) __UpperCAmelCase : Union[str, Any] = image_embeddings __UpperCAmelCase : str = grid_points.shape[1] __UpperCAmelCase : Union[str, Any] = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""" ) for i in range(0 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __UpperCAmelCase : Optional[int] = grid_points[:, i : i + points_per_batch, :, :] __UpperCAmelCase : List[str] = input_labels[:, i : i + points_per_batch] __UpperCAmelCase : List[str] = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any]=0.88 , UpperCamelCase : Optional[Any]=0.95 , UpperCamelCase : Dict=0 , UpperCamelCase : List[str]=1 , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = model_inputs.pop("""input_boxes""" ) __UpperCAmelCase : Any = model_inputs.pop("""is_last""" ) __UpperCAmelCase : Tuple = model_inputs.pop("""original_sizes""" ).tolist() __UpperCAmelCase : Union[str, Any] = model_inputs.pop("""reshaped_input_sizes""" ).tolist() __UpperCAmelCase : int = self.model(**_SCREAMING_SNAKE_CASE ) # post processing happens here in order to avoid CPU GPU copies of ALL the masks __UpperCAmelCase : Optional[int] = model_outputs["""pred_masks"""] __UpperCAmelCase : List[str] = self.image_processor.post_process_masks( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , binarize=_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[int] = model_outputs["""iou_scores"""] __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=False , UpperCamelCase : List[Any]=False , UpperCamelCase : List[str]=0.7 , ): '''simple docstring''' __UpperCAmelCase : Any = [] __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : str = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""" ) ) all_masks.extend(model_output.pop("""masks""" ) ) all_boxes.append(model_output.pop("""boxes""" ) ) __UpperCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[Any] = torch.cat(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = self.image_processor.post_process_for_mask_generation( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[Any] = defaultdict(_SCREAMING_SNAKE_CASE ) for output in model_outputs: for k, v in output.items(): extra[k].append(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[Any] = {} if output_rle_mask: __UpperCAmelCase : Tuple = rle_mask if output_bboxes_mask: __UpperCAmelCase : List[Any] = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
355
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
0
"""simple docstring""" import argparse import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( CLIPTokenizer, CLIPTokenizerFast, VideoMAEImageProcessor, XCLIPConfig, XCLIPModel, XCLIPProcessor, XCLIPTextConfig, XCLIPVisionConfig, ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = XCLIPTextConfig() # derive patch size from model name __UpperCAmelCase : Dict = model_name.find("""patch""" ) __UpperCAmelCase : Dict = int(model_name[start_idx + len("""patch""" ) : start_idx + len("""patch""" ) + 2] ) __UpperCAmelCase : Optional[Any] = XCLIPVisionConfig(patch_size=_UpperCamelCase , num_frames=_UpperCamelCase ) if "large" in model_name: __UpperCAmelCase : Any = 7_6_8 __UpperCAmelCase : Any = 3_0_7_2 __UpperCAmelCase : List[Any] = 1_2 __UpperCAmelCase : Any = 1_0_2_4 __UpperCAmelCase : List[Any] = 4_0_9_6 __UpperCAmelCase : Union[str, Any] = 1_6 __UpperCAmelCase : int = 2_4 __UpperCAmelCase : Any = 7_6_8 __UpperCAmelCase : List[str] = 3_0_7_2 if model_name == "xclip-large-patch14-16-frames": __UpperCAmelCase : str = 3_3_6 __UpperCAmelCase : int = XCLIPConfig.from_text_vision_configs(_UpperCamelCase , _UpperCamelCase ) if "large" in model_name: __UpperCAmelCase : Union[str, Any] = 7_6_8 return config def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' if name == "token_embedding.weight": __UpperCAmelCase : Dict = name.replace("""token_embedding.weight""" , """text_model.embeddings.token_embedding.weight""" ) if name == "positional_embedding": __UpperCAmelCase : Dict = name.replace("""positional_embedding""" , """text_model.embeddings.position_embedding.weight""" ) if "ln_1" in name: __UpperCAmelCase : List[Any] = name.replace("""ln_1""" , """layer_norm1""" ) if "ln_2" in name: __UpperCAmelCase : int = name.replace("""ln_2""" , """layer_norm2""" ) if "c_fc" in name: __UpperCAmelCase : Optional[Any] = name.replace("""c_fc""" , """fc1""" ) if "c_proj" in name: __UpperCAmelCase : Any = name.replace("""c_proj""" , """fc2""" ) if name.startswith("""transformer.resblocks""" ): __UpperCAmelCase : Any = name.replace("""transformer.resblocks""" , """text_model.encoder.layers""" ) if "attn.out_proj" in name and "message" not in name: __UpperCAmelCase : List[Any] = name.replace("""attn.out_proj""" , """self_attn.out_proj""" ) if "ln_final" in name: __UpperCAmelCase : Optional[Any] = name.replace("""ln_final""" , """text_model.final_layer_norm""" ) # visual encoder if name == "visual.class_embedding": __UpperCAmelCase : Dict = name.replace("""visual.class_embedding""" , """vision_model.embeddings.class_embedding""" ) if name == "visual.positional_embedding": __UpperCAmelCase : Tuple = name.replace("""visual.positional_embedding""" , """vision_model.embeddings.position_embedding.weight""" ) if name.startswith("""visual.transformer.resblocks""" ): __UpperCAmelCase : Tuple = name.replace("""visual.transformer.resblocks""" , """vision_model.encoder.layers""" ) if "visual.conv1" in name: __UpperCAmelCase : Optional[Any] = name.replace("""visual.conv1""" , """vision_model.embeddings.patch_embedding""" ) if "visual.ln_pre" in name: __UpperCAmelCase : Any = name.replace("""visual.ln_pre""" , """vision_model.pre_layernorm""" ) if "visual.ln_post" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""visual.ln_post""" , """vision_model.post_layernorm""" ) if "visual.proj" in name: __UpperCAmelCase : Optional[Any] = name.replace("""visual.proj""" , """visual_projection.weight""" ) if "text_projection" in name: __UpperCAmelCase : List[str] = name.replace("""text_projection""" , """text_projection.weight""" ) # things on top if "prompts_visual_proj" in name: __UpperCAmelCase : Union[str, Any] = name.replace("""prompts_visual_proj""" , """prompts_visual_projection""" ) if "prompts_visual_ln" in name: __UpperCAmelCase : Optional[Any] = name.replace("""prompts_visual_ln""" , """prompts_visual_layernorm""" ) # mit if name == "mit.positional_embedding": __UpperCAmelCase : Dict = name.replace("""positional""" , """position""" ) if name.startswith("""mit.resblocks""" ): __UpperCAmelCase : Tuple = name.replace("""mit.resblocks""" , """mit.encoder.layers""" ) # prompts generator if name.startswith("""prompts_generator.norm""" ): __UpperCAmelCase : Optional[Any] = name.replace("""prompts_generator.norm""" , """prompts_generator.layernorm""" ) return name def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> str: '''simple docstring''' for key in orig_state_dict.copy().keys(): __UpperCAmelCase : int = orig_state_dict.pop(_UpperCamelCase ) if "attn.in_proj" in key: __UpperCAmelCase : Optional[Any] = key.split(""".""" ) if key.startswith("""visual""" ): __UpperCAmelCase : Optional[Any] = key_split[3] __UpperCAmelCase : str = config.vision_config.hidden_size if "message_attn" in key: if "weight" in key: __UpperCAmelCase : Dict = val[ :dim, : ] __UpperCAmelCase : List[str] = val[ dim : dim * 2, : ] __UpperCAmelCase : List[Any] = val[ -dim:, : ] else: __UpperCAmelCase : Union[str, Any] = val[ :dim ] __UpperCAmelCase : Union[str, Any] = val[ dim : dim * 2 ] __UpperCAmelCase : str = val[ -dim: ] else: if "weight" in key: __UpperCAmelCase : List[Any] = val[ :dim, : ] __UpperCAmelCase : Dict = val[ dim : dim * 2, : ] __UpperCAmelCase : List[Any] = val[ -dim:, : ] else: __UpperCAmelCase : Any = val[:dim] __UpperCAmelCase : Dict = val[ dim : dim * 2 ] __UpperCAmelCase : List[str] = val[-dim:] elif key.startswith("""mit""" ): __UpperCAmelCase : int = key_split[2] __UpperCAmelCase : Optional[Any] = config.vision_config.mit_hidden_size if "weight" in key: __UpperCAmelCase : Optional[Any] = val[:dim, :] __UpperCAmelCase : Optional[Any] = val[dim : dim * 2, :] __UpperCAmelCase : int = val[-dim:, :] else: __UpperCAmelCase : str = val[:dim] __UpperCAmelCase : Optional[int] = val[dim : dim * 2] __UpperCAmelCase : Optional[int] = val[-dim:] else: __UpperCAmelCase : int = key_split[2] __UpperCAmelCase : List[Any] = config.text_config.hidden_size if "weight" in key: __UpperCAmelCase : Tuple = val[:dim, :] __UpperCAmelCase : Optional[int] = val[ dim : dim * 2, : ] __UpperCAmelCase : Optional[int] = val[-dim:, :] else: __UpperCAmelCase : List[Any] = val[:dim] __UpperCAmelCase : List[Any] = val[ dim : dim * 2 ] __UpperCAmelCase : int = val[-dim:] else: __UpperCAmelCase : Optional[int] = rename_key(_UpperCamelCase ) if new_key_name in ["visual_projection.weight", "text_projection.weight"]: __UpperCAmelCase : Optional[Any] = val.T __UpperCAmelCase : List[Any] = val return orig_state_dict def lowerCamelCase ( _UpperCamelCase : Dict ) -> str: '''simple docstring''' if num_frames == 8: __UpperCAmelCase : List[Any] = 'eating_spaghetti_8_frames.npy' elif num_frames == 1_6: __UpperCAmelCase : List[Any] = 'eating_spaghetti.npy' elif num_frames == 3_2: __UpperCAmelCase : Optional[Any] = 'eating_spaghetti_32_frames.npy' __UpperCAmelCase : str = hf_hub_download( repo_id="""hf-internal-testing/spaghetti-video""" , filename=_UpperCamelCase , repo_type="""dataset""" , ) __UpperCAmelCase : Optional[Any] = np.load(_UpperCamelCase ) return list(_UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Tuple=None , _UpperCamelCase : Union[str, Any]=False ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = { # fully supervised kinetics-400 checkpoints 'xclip-base-patch32': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth', 'xclip-base-patch32-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth' ), 'xclip-base-patch16': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth', 'xclip-base-patch16-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth' ), 'xclip-large-patch14': 'https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&amp;export=download&amp;confirm=t&amp;uuid=b26caedc-88e2-473e-830a-9d158b653cdb', 'xclip-large-patch14-16-frames': 'https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&amp;export=download&amp;confirm=t&amp;uuid=538fa810-e671-4050-b385-9a623f89804f', # fully supervised kinetics-600 checkpoints 'xclip-base-patch16-kinetics-600': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth' ), 'xclip-base-patch16-kinetics-600-16-frames': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth' ), 'xclip-large-patch14-kinetics-600': 'https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&amp;export=download&amp;confirm=t&amp;uuid=141d4977-4a65-44ae-864f-4b0c19f838be', # few shot 'xclip-base-patch16-hmdb-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth' ), 'xclip-base-patch16-hmdb-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth' ), 'xclip-base-patch16-hmdb-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth' ), 'xclip-base-patch16-hmdb-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth' ), 'xclip-base-patch16-ucf-2-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth' ), 'xclip-base-patch16-ucf-4-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth' ), 'xclip-base-patch16-ucf-8-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth' ), 'xclip-base-patch16-ucf-16-shot': ( 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth' ), # zero shot 'xclip-base-patch16-zero-shot': 'https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth', } __UpperCAmelCase : Any = model_to_url[model_name] __UpperCAmelCase : Tuple = 8 if "16-frames" in model_name: __UpperCAmelCase : str = 1_6 elif "shot" in model_name: __UpperCAmelCase : int = 3_2 __UpperCAmelCase : Tuple = get_xclip_config(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : List[str] = XCLIPModel(_UpperCamelCase ) model.eval() if "drive" in checkpoint_url: __UpperCAmelCase : Optional[Any] = 'pytorch_model.bin' gdown.cached_download(_UpperCamelCase , _UpperCamelCase , quiet=_UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.load(_UpperCamelCase , map_location="""cpu""" )['model'] else: __UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase )['model'] __UpperCAmelCase : int = convert_state_dict(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : int = XCLIPModel(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase ) assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"] model.eval() __UpperCAmelCase : Optional[Any] = 3_3_6 if model_name == 'xclip-large-patch14-16-frames' else 2_2_4 __UpperCAmelCase : List[str] = VideoMAEImageProcessor(size=_UpperCamelCase ) __UpperCAmelCase : Optional[int] = CLIPTokenizer.from_pretrained("""openai/clip-vit-base-patch32""" ) __UpperCAmelCase : List[str] = CLIPTokenizerFast.from_pretrained("""openai/clip-vit-base-patch32""" ) __UpperCAmelCase : Optional[int] = XCLIPProcessor(image_processor=_UpperCamelCase , tokenizer=_UpperCamelCase ) __UpperCAmelCase : str = prepare_video(_UpperCamelCase ) __UpperCAmelCase : Any = processor( text=["""playing sports""", """eating spaghetti""", """go shopping"""] , videos=_UpperCamelCase , return_tensors="""pt""" , padding=_UpperCamelCase ) print("""Shape of pixel values:""" , inputs.pixel_values.shape ) with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(**_UpperCamelCase ) # Verify outputs __UpperCAmelCase : int = outputs.logits_per_video __UpperCAmelCase : Optional[int] = logits_per_video.softmax(dim=1 ) print("""Probs:""" , _UpperCamelCase ) # kinetics-400 if model_name == "xclip-base-patch32": __UpperCAmelCase : Union[str, Any] = torch.tensor([[0.0_019, 0.9_951, 0.0_030]] ) elif model_name == "xclip-base-patch32-16-frames": __UpperCAmelCase : List[Any] = torch.tensor([[7.0_999E-04, 9.9_883E-01, 4.5_580E-04]] ) elif model_name == "xclip-base-patch16": __UpperCAmelCase : List[Any] = torch.tensor([[0.0_083, 0.9_681, 0.0_236]] ) elif model_name == "xclip-base-patch16-16-frames": __UpperCAmelCase : int = torch.tensor([[7.6_937E-04, 9.9_728E-01, 1.9_473E-03]] ) elif model_name == "xclip-large-patch14": __UpperCAmelCase : List[str] = torch.tensor([[0.0_062, 0.9_864, 0.0_075]] ) elif model_name == "xclip-large-patch14-16-frames": __UpperCAmelCase : Optional[int] = torch.tensor([[3.3_877E-04, 9.9_937E-01, 2.8_888E-04]] ) # kinetics-600 elif model_name == "xclip-base-patch16-kinetics-600": __UpperCAmelCase : int = torch.tensor([[0.0_555, 0.8_914, 0.0_531]] ) elif model_name == "xclip-base-patch16-kinetics-600-16-frames": __UpperCAmelCase : Any = torch.tensor([[3.8_554E-04, 9.9_929E-01, 3.2_754E-04]] ) elif model_name == "xclip-large-patch14-kinetics-600": __UpperCAmelCase : Dict = torch.tensor([[0.0_036, 0.9_920, 0.0_045]] ) # few shot elif model_name == "xclip-base-patch16-hmdb-2-shot": __UpperCAmelCase : Tuple = torch.tensor([[7.1_890E-06, 9.9_994E-01, 5.6_559E-05]] ) elif model_name == "xclip-base-patch16-hmdb-4-shot": __UpperCAmelCase : List[Any] = torch.tensor([[1.0_320E-05, 9.9_993E-01, 6.2_435E-05]] ) elif model_name == "xclip-base-patch16-hmdb-8-shot": __UpperCAmelCase : Optional[Any] = torch.tensor([[4.1_377E-06, 9.9_990E-01, 9.8_386E-05]] ) elif model_name == "xclip-base-patch16-hmdb-16-shot": __UpperCAmelCase : int = torch.tensor([[4.1_347E-05, 9.9_962E-01, 3.3_411E-04]] ) elif model_name == "xclip-base-patch16-ucf-2-shot": __UpperCAmelCase : List[str] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] ) elif model_name == "xclip-base-patch16-ucf-4-shot": __UpperCAmelCase : List[str] = torch.tensor([[8.5_857E-05, 9.9_928E-01, 6.3_291E-04]] ) elif model_name == "xclip-base-patch16-ucf-8-shot": __UpperCAmelCase : List[Any] = torch.tensor([[0.0_027, 0.9_904, 0.0_070]] ) elif model_name == "xclip-base-patch16-ucf-16-shot": __UpperCAmelCase : List[Any] = torch.tensor([[9.8_219E-04, 9.9_593E-01, 3.0_863E-03]] ) # zero shot elif model_name == "xclip-base-patch16-zero-shot": __UpperCAmelCase : str = torch.tensor([[3.5_082E-04, 9.9_785E-01, 1.7_966E-03]] ) else: raise ValueError(f'''Model name {model_name} not supported''' ) assert torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCamelCase ) if push_to_hub: print("""Pushing model, processor and slow tokenizer files to the hub...""" ) model.push_to_hub(_UpperCamelCase , organization="""nielsr""" ) processor.push_to_hub(_UpperCamelCase , organization="""nielsr""" ) slow_tokenizer.push_to_hub(_UpperCamelCase , organization="""nielsr""" ) if __name__ == "__main__": UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='xclip-base-patch32', type=str, help='Name of the model.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) UpperCAmelCase : Any = parser.parse_args() convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
356
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import os import socket from contextlib import contextmanager import torch from ..commands.config.default import write_basic_config # noqa: F401 from ..state import PartialState from .dataclasses import DistributedType from .imports import is_deepspeed_available, is_tpu_available from .transformer_engine import convert_model from .versions import is_torch_version if is_deepspeed_available(): from deepspeed import DeepSpeedEngine if is_tpu_available(check_device=False): import torch_xla.core.xla_model as xm def lowerCamelCase ( _UpperCamelCase : str ) -> Tuple: '''simple docstring''' if is_torch_version("""<""" , """2.0.0""" ) or not hasattr(_UpperCamelCase , """_dynamo""" ): return False return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule ) def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : bool = True ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel) __UpperCAmelCase : Optional[Any] = is_compiled_module(_UpperCamelCase ) if is_compiled: __UpperCAmelCase : Optional[Any] = model __UpperCAmelCase : Union[str, Any] = model._orig_mod if is_deepspeed_available(): options += (DeepSpeedEngine,) while isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : List[str] = model.module if not keep_fpaa_wrapper: __UpperCAmelCase : List[Any] = getattr(_UpperCamelCase , """forward""" ) __UpperCAmelCase : Optional[Any] = model.__dict__.pop("""_original_forward""" , _UpperCamelCase ) if original_forward is not None: while hasattr(_UpperCamelCase , """__wrapped__""" ): __UpperCAmelCase : int = forward.__wrapped__ if forward == original_forward: break __UpperCAmelCase : Any = forward if getattr(_UpperCamelCase , """_converted_to_transformer_engine""" , _UpperCamelCase ): convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase ) if is_compiled: __UpperCAmelCase : Dict = model __UpperCAmelCase : int = compiled_model return model def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' PartialState().wait_for_everyone() def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' if PartialState().distributed_type == DistributedType.TPU: xm.save(_UpperCamelCase , _UpperCamelCase ) elif PartialState().local_process_index == 0: torch.save(_UpperCamelCase , _UpperCamelCase ) @contextmanager def lowerCamelCase ( **_UpperCamelCase : Optional[Any] ) -> Tuple: '''simple docstring''' for key, value in kwargs.items(): __UpperCAmelCase : int = str(_UpperCamelCase ) yield for key in kwargs: if key.upper() in os.environ: del os.environ[key.upper()] def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> Union[str, Any]: '''simple docstring''' if not hasattr(_UpperCamelCase , """__qualname__""" ) and not hasattr(_UpperCamelCase , """__name__""" ): __UpperCAmelCase : int = getattr(_UpperCamelCase , """__class__""" , _UpperCamelCase ) if hasattr(_UpperCamelCase , """__qualname__""" ): return obj.__qualname__ if hasattr(_UpperCamelCase , """__name__""" ): return obj.__name__ return str(_UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any ) -> List[str]: '''simple docstring''' for key, value in source.items(): if isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Optional[int] = destination.setdefault(_UpperCamelCase , {} ) merge_dicts(_UpperCamelCase , _UpperCamelCase ) else: __UpperCAmelCase : Any = value return destination def lowerCamelCase ( _UpperCamelCase : int = None ) -> List[Any]: '''simple docstring''' if port is None: __UpperCAmelCase : Tuple = 2_9_5_0_0 with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s: return s.connect_ex(("""localhost""", port) ) == 0
357
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
0
"""simple docstring""" from __future__ import annotations import math def lowerCamelCase ( _UpperCamelCase : Tuple ) -> Union[str, Any]: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(A__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True UpperCAmelCase : Union[str, Any] = [num for num in range(3, 10_0001, 2) if not is_prime(num)] def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Any: '''simple docstring''' if not isinstance(A__ , A__ ): raise ValueError("""n must be an integer""" ) if n <= 0: raise ValueError("""n must be >= 0""" ) __UpperCAmelCase : str = [] for num in range(len(A__ ) ): __UpperCAmelCase : Optional[int] = 0 while 2 * i * i <= odd_composites[num]: __UpperCAmelCase : Dict = odd_composites[num] - 2 * i * i if is_prime(A__ ): break i += 1 else: list_nums.append(odd_composites[num] ) if len(A__ ) == n: return list_nums return [] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return compute_nums(1 )[0] if __name__ == "__main__": print(F"{solution() = }")
358
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Tuple ) -> bool: '''simple docstring''' if num < 0: return False __UpperCAmelCase : int = num __UpperCAmelCase : int = 0 while num > 0: __UpperCAmelCase : List[str] = rev_num * 1_0 + (num % 1_0) num //= 1_0 return num_copy == rev_num if __name__ == "__main__": import doctest doctest.testmod()
359
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
0
"""simple docstring""" from collections.abc import Callable import numpy as np def lowerCamelCase ( _UpperCamelCase : Callable , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float , _UpperCamelCase : float ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : int = int(np.ceil((x_end - xa) / step_size ) ) __UpperCAmelCase : Optional[Any] = np.zeros((n + 1,) ) __UpperCAmelCase : Optional[int] = ya __UpperCAmelCase : str = xa for k in range(a__ ): __UpperCAmelCase : int = y[k] + step_size * ode_func(a__ , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
360
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
0
"""simple docstring""" from __future__ import annotations UpperCAmelCase : str = list[list[int]] # assigning initial values to the grid UpperCAmelCase : Tuple = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution UpperCAmelCase : str = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def lowerCamelCase ( _UpperCamelCase : Matrix , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> str: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def lowerCamelCase ( _UpperCamelCase : Matrix ) -> Dict: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def lowerCamelCase ( _UpperCamelCase : Matrix ) -> Union[str, Any]: '''simple docstring''' if location := find_empty_location(_UpperCamelCase ): __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 1_0 ): if is_safe(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : List[Any] = digit if sudoku(_UpperCamelCase ) is not None: return grid __UpperCAmelCase : Optional[Any] = 0 return None def lowerCamelCase ( _UpperCamelCase : Matrix ) -> Optional[Any]: '''simple docstring''' for row in grid: for cell in row: print(_UpperCamelCase , end=""" """ ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('\nExample grid:\n' + '=' * 20) print_solution(example_grid) print('\nExample grid solution:') UpperCAmelCase : Any = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('Cannot find a solution.')
361
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
0
"""simple docstring""" import flax.linen as nn import jax import jax.numpy as jnp class lowerCamelCase__ ( nn.Module ): """simple docstring""" __a = 42 __a = jnp.floataa def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : Optional[Any] , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : int = hidden_states.shape __UpperCAmelCase : str = jax.image.resize( _a , shape=(batch, height * 2, width * 2, channels) , method="""nearest""" , ) __UpperCAmelCase : Dict = self.conv(_a ) return hidden_states class lowerCamelCase__ ( nn.Module ): """simple docstring""" __a = 42 __a = jnp.floataa def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self : str , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : int = self.conv(_a ) return hidden_states class lowerCamelCase__ ( nn.Module ): """simple docstring""" __a = 42 __a = None __a = 0.0 __a = None __a = jnp.floataa def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Any = self.in_channels if self.out_channels is None else self.out_channels __UpperCAmelCase : List[Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : Any = nn.Conv( _a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Union[str, Any] = nn.Dense(_a , dtype=self.dtype ) __UpperCAmelCase : str = nn.GroupNorm(num_groups=32 , epsilon=1e-5 ) __UpperCAmelCase : List[str] = nn.Dropout(self.dropout_prob ) __UpperCAmelCase : List[str] = nn.Conv( _a , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) __UpperCAmelCase : Dict = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut __UpperCAmelCase : Any = None if use_nin_shortcut: __UpperCAmelCase : Union[str, Any] = nn.Conv( _a , kernel_size=(1, 1) , strides=(1, 1) , padding="""VALID""" , dtype=self.dtype , ) def __call__( self : List[Any] , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Dict=True ): '''simple docstring''' __UpperCAmelCase : Optional[int] = hidden_states __UpperCAmelCase : Dict = self.norma(_a ) __UpperCAmelCase : str = nn.swish(_a ) __UpperCAmelCase : List[str] = self.conva(_a ) __UpperCAmelCase : List[Any] = self.time_emb_proj(nn.swish(_a ) ) __UpperCAmelCase : Any = jnp.expand_dims(jnp.expand_dims(_a , 1 ) , 1 ) __UpperCAmelCase : Dict = hidden_states + temb __UpperCAmelCase : int = self.norma(_a ) __UpperCAmelCase : List[Any] = nn.swish(_a ) __UpperCAmelCase : Tuple = self.dropout(_a , _a ) __UpperCAmelCase : Optional[int] = self.conva(_a ) if self.conv_shortcut is not None: __UpperCAmelCase : str = self.conv_shortcut(_a ) return hidden_states + residual
362
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : str = { 'configuration_blenderbot_small': [ 'BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BlenderbotSmallConfig', 'BlenderbotSmallOnnxConfig', ], 'tokenization_blenderbot_small': ['BlenderbotSmallTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = ['BlenderbotSmallTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST', 'BlenderbotSmallForCausalLM', 'BlenderbotSmallForConditionalGeneration', 'BlenderbotSmallModel', 'BlenderbotSmallPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'TFBlenderbotSmallForConditionalGeneration', 'TFBlenderbotSmallModel', 'TFBlenderbotSmallPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Union[str, Any] = [ 'FlaxBlenderbotSmallForConditionalGeneration', 'FlaxBlenderbotSmallModel', 'FlaxBlenderbotSmallPreTrainedModel', ] if TYPE_CHECKING: from .configuration_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallOnnxConfig, ) from .tokenization_blenderbot_small import BlenderbotSmallTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, BlenderbotSmallPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_blenderbot_small import ( TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel, TFBlenderbotSmallPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, FlaxBlenderbotSmallPreTrainedModel, ) else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
363
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCAmelCase : Union[str, Any] = ['small', 'medium', 'large'] UpperCAmelCase : Optional[Any] = 'lm_head.decoder.weight' UpperCAmelCase : List[str] = 'lm_head.weight' def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Tuple ) -> Dict: '''simple docstring''' __UpperCAmelCase : Optional[Any] = torch.load(__snake_case ) __UpperCAmelCase : Dict = d.pop(__snake_case ) os.makedirs(__snake_case , exist_ok=__snake_case ) torch.save(__snake_case , os.path.join(__snake_case , __snake_case ) ) if __name__ == "__main__": UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCAmelCase : Tuple = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCAmelCase : Optional[Any] = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl") UpperCAmelCase : str = F"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
364
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
0
"""simple docstring""" import argparse import os import torch from transformers.utils import WEIGHTS_NAME UpperCAmelCase : Tuple = ['small', 'medium', 'large'] UpperCAmelCase : Dict = 'lm_head.decoder.weight' UpperCAmelCase : str = 'lm_head.weight' def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> int: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = torch.load(_a ) __UpperCAmelCase : Tuple = d.pop(_a ) os.makedirs(_a , exist_ok=_a ) torch.save(_a , os.path.join(_a , _a ) ) if __name__ == "__main__": UpperCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--dialogpt_path', default='.', type=str) UpperCAmelCase : Optional[int] = parser.parse_args() for MODEL in DIALOGPT_MODELS: UpperCAmelCase : Dict = os.path.join(args.dialogpt_path, F"{MODEL}_ft.pkl") UpperCAmelCase : Any = F"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
365
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
0
"""simple docstring""" import math from numpy import inf from scipy.integrate import quad def lowerCamelCase ( _UpperCamelCase : Tuple ) -> float: '''simple docstring''' if num <= 0: raise ValueError("""math domain error""" ) return quad(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , args=(SCREAMING_SNAKE_CASE_) )[0] def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> float: '''simple docstring''' return math.pow(SCREAMING_SNAKE_CASE_ , z - 1 ) * math.exp(-x ) if __name__ == "__main__": from doctest import testmod testmod()
366
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
0
"""simple docstring""" import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL UpperCAmelCase : List[Any] = logging.get_logger(__name__) def lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : Union[int, Iterable[int]] , _UpperCamelCase : bool , _UpperCamelCase : int ) -> List[str]: '''simple docstring''' def constraint_to_multiple_of(_UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : Optional[Any]=0 , _UpperCamelCase : Any=None ): __UpperCAmelCase : str = round(val / multiple ) * multiple if max_val is not None and x > max_val: __UpperCAmelCase : Optional[int] = math.floor(val / multiple ) * multiple if x < min_val: __UpperCAmelCase : Union[str, Any] = math.ceil(val / multiple ) * multiple return x __UpperCAmelCase : int = (output_size, output_size) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else output_size __UpperCAmelCase ,__UpperCAmelCase : Any = get_image_size(SCREAMING_SNAKE_CASE__ ) __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = output_size # determine new height and width __UpperCAmelCase : Any = output_height / input_height __UpperCAmelCase : Optional[Any] = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width __UpperCAmelCase : str = scale_width else: # fit height __UpperCAmelCase : Dict = scale_height __UpperCAmelCase : Optional[Any] = constraint_to_multiple_of(scale_height * input_height , multiple=SCREAMING_SNAKE_CASE__ ) __UpperCAmelCase : Union[str, Any] = constraint_to_multiple_of(scale_width * input_width , multiple=SCREAMING_SNAKE_CASE__ ) return (new_height, new_width) class lowerCamelCase__ ( lowerCamelCase_ ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase : bool = False , UpperCamelCase : int = 1 , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' super().__init__(**_UpperCAmelCase ) __UpperCAmelCase : Optional[int] = size if size is not None else {"""height""": 384, """width""": 384} __UpperCAmelCase : Union[str, Any] = get_size_dict(_UpperCAmelCase ) __UpperCAmelCase : List[str] = do_resize __UpperCAmelCase : Dict = size __UpperCAmelCase : Optional[int] = keep_aspect_ratio __UpperCAmelCase : Any = ensure_multiple_of __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Union[str, Any] = do_rescale __UpperCAmelCase : str = rescale_factor __UpperCAmelCase : Optional[int] = do_normalize __UpperCAmelCase : Dict = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __UpperCAmelCase : Dict = image_std if image_std is not None else IMAGENET_STANDARD_STD def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : bool = False , UpperCamelCase : int = 1 , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}''' ) __UpperCAmelCase : Dict = get_resize_output_image_size( _UpperCAmelCase , output_size=(size["""height"""], size["""width"""]) , keep_aspect_ratio=_UpperCAmelCase , multiple=_UpperCAmelCase , ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : Tuple = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Any = size if size is not None else self.size __UpperCAmelCase : int = get_size_dict(_UpperCAmelCase ) __UpperCAmelCase : int = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio __UpperCAmelCase : Tuple = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of __UpperCAmelCase : Optional[int] = resample if resample is not None else self.resample __UpperCAmelCase : int = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Tuple = image_std if image_std is not None else self.image_std __UpperCAmelCase : int = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None or resample is None: raise ValueError("""Size and resample must be specified if do_resize is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __UpperCAmelCase : Optional[Any] = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: __UpperCAmelCase : List[Any] = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_rescale: __UpperCAmelCase : List[Any] = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: __UpperCAmelCase : str = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] __UpperCAmelCase : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] __UpperCAmelCase : Union[str, Any] = {"""pixel_values""": images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : List[Tuple] = None ): '''simple docstring''' __UpperCAmelCase : Optional[int] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_UpperCAmelCase ) != len(_UpperCAmelCase ): raise ValueError( """Make sure that you pass in as many target sizes as the batch dimension of the logits""" ) if is_torch_tensor(_UpperCAmelCase ): __UpperCAmelCase : str = target_sizes.numpy() __UpperCAmelCase : Dict = [] for idx in range(len(_UpperCAmelCase ) ): __UpperCAmelCase : Optional[Any] = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=_UpperCAmelCase ) __UpperCAmelCase : str = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_UpperCAmelCase ) else: __UpperCAmelCase : str = logits.argmax(dim=1 ) __UpperCAmelCase : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
367
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0 ) -> int: '''simple docstring''' return sum(e for e in range(3 , _UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
368
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
0
"""simple docstring""" def lowerCamelCase ( ) -> list[list[int]]: '''simple docstring''' return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )] UpperCAmelCase : List[str] = generate_large_matrix() UpperCAmelCase : List[Any] = ( [[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]], [[3, 2], [1, 0]], [[7, 7, 6]], [[7, 7, 6], [-1, -2, -3]], grid, ) def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> None: '''simple docstring''' assert all(row == sorted(a__ , reverse=a__ ) for row in grid ) assert all(list(a__ ) == sorted(a__ , reverse=a__ ) for col in zip(*a__ ) ) def lowerCamelCase ( _UpperCamelCase : Tuple ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = len(a__ ) - 1 # Edge cases such as no values or all numbers are negative. if not array or array[0] < 0: return 0 while right + 1 > left: __UpperCAmelCase : Any = (left + right) // 2 __UpperCAmelCase : List[Any] = array[mid] # Num must be negative and the index must be greater than or equal to 0. if num < 0 and array[mid - 1] >= 0: return mid if num >= 0: __UpperCAmelCase : str = mid + 1 else: __UpperCAmelCase : Any = mid - 1 # No negative numbers so return the last index of the array + 1 which is the length. return len(a__ ) def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> int: '''simple docstring''' __UpperCAmelCase : Dict = 0 __UpperCAmelCase : str = len(grid[0] ) for i in range(len(a__ ) ): __UpperCAmelCase : Optional[Any] = find_negative_index(grid[i][:bound] ) total += bound return (len(a__ ) * len(grid[0] )) - total def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> int: '''simple docstring''' return len([number for row in grid for number in row if number < 0] ) def lowerCamelCase ( _UpperCamelCase : Any ) -> int: '''simple docstring''' __UpperCAmelCase : int = 0 for row in grid: for i, number in enumerate(a__ ): if number < 0: total += len(a__ ) - i break return total def lowerCamelCase ( ) -> None: '''simple docstring''' from timeit import timeit print("""Running benchmarks""" ) __UpperCAmelCase : Optional[int] = ( """from __main__ import count_negatives_binary_search, """ """count_negatives_brute_force, count_negatives_brute_force_with_break, grid""" ) for func in ( "count_negatives_binary_search", # took 0.7727 seconds "count_negatives_brute_force_with_break", # took 4.6505 seconds "count_negatives_brute_force", # took 12.8160 seconds ): __UpperCAmelCase : Optional[Any] = timeit(f'''{func}(grid=grid)''' , setup=a__ , number=5_0_0 ) print(f'''{func}() took {time:0.4f} seconds''' ) if __name__ == "__main__": import doctest doctest.testmod() benchmark()
369
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : str = { 'configuration_instructblip': [ 'INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'InstructBlipConfig', 'InstructBlipQFormerConfig', 'InstructBlipVisionConfig', ], 'processing_instructblip': ['InstructBlipProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'InstructBlipQFormerModel', 'InstructBlipPreTrainedModel', 'InstructBlipForConditionalGeneration', 'InstructBlipVisionModel', ] if TYPE_CHECKING: from .configuration_instructblip import ( INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, InstructBlipConfig, InstructBlipQFormerConfig, InstructBlipVisionConfig, ) from .processing_instructblip import InstructBlipProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_instructblip import ( INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST, InstructBlipForConditionalGeneration, InstructBlipPreTrainedModel, InstructBlipQFormerModel, InstructBlipVisionModel, ) else: import sys UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
370
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
0
"""simple docstring""" import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Dict=13 , UpperCamelCase : Optional[int]=[30, 30] , UpperCamelCase : Optional[int]=2 , UpperCamelCase : Dict=3 , UpperCamelCase : int=True , UpperCamelCase : List[str]=True , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : str=5 , UpperCamelCase : List[str]=4 , UpperCamelCase : Any=37 , UpperCamelCase : List[Any]="gelu" , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : int=10 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Dict=3 , UpperCamelCase : List[Any]=None , UpperCamelCase : str=8 , UpperCamelCase : List[str]=10 , ): '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : List[str] = patch_size __UpperCAmelCase : str = num_channels __UpperCAmelCase : Optional[int] = is_training __UpperCAmelCase : str = use_labels __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : Optional[int] = num_hidden_layers __UpperCAmelCase : Any = num_attention_heads __UpperCAmelCase : int = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : int = attention_probs_dropout_prob __UpperCAmelCase : str = type_sequence_label_size __UpperCAmelCase : Any = initializer_range __UpperCAmelCase : List[str] = num_labels __UpperCAmelCase : List[str] = scope __UpperCAmelCase : int = n_targets __UpperCAmelCase : List[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens __UpperCAmelCase : Optional[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) __UpperCAmelCase : Union[str, Any] = num_patches + 1 + self.num_detection_tokens def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : int = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) __UpperCAmelCase : Dict = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) __UpperCAmelCase : int = [] for i in range(self.batch_size ): __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : str = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Dict = torch.rand(self.n_targets , 4 , device=_SCREAMING_SNAKE_CASE ) labels.append(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Optional[Any] = self.get_config() return config, pixel_values, labels def lowerCamelCase__ ( self : int ): '''simple docstring''' return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def lowerCamelCase__ ( self : int , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = YolosModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = YolosForObjectDetection(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __UpperCAmelCase : int = model(pixel_values=_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) __UpperCAmelCase : Tuple = model(pixel_values=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() __UpperCAmelCase : Optional[Any] = config_and_inputs __UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = (YolosModel, YolosForObjectDetection) if is_torch_available() else () __a = ( {"""feature-extraction""": YolosModel, """object-detection""": YolosForObjectDetection} if is_torch_available() else {} ) __a = False __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any]=False ): '''simple docstring''' __UpperCAmelCase : str = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": __UpperCAmelCase : List[Any] = [] for i in range(self.model_tester.batch_size ): __UpperCAmelCase : Union[str, Any] = {} __UpperCAmelCase : str = torch.ones( size=(self.model_tester.n_targets,) , device=_SCREAMING_SNAKE_CASE , dtype=torch.long ) __UpperCAmelCase : List[str] = torch.ones( self.model_tester.n_targets , 4 , device=_SCREAMING_SNAKE_CASE , dtype=torch.float ) labels.append(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Tuple = labels return inputs_dict def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Optional[int] = YolosModelTester(self ) __UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : str ): '''simple docstring''' pass def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) __UpperCAmelCase : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __UpperCAmelCase : Any = [*signature.parameters.keys()] __UpperCAmelCase : List[Any] = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __UpperCAmelCase : Any = True # in YOLOS, the seq_len is different __UpperCAmelCase : str = self.model_tester.expected_seq_len for model_class in self.all_model_classes: __UpperCAmelCase : Any = True __UpperCAmelCase : List[Any] = False __UpperCAmelCase : Any = True __UpperCAmelCase : Optional[int] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase : Dict = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : Dict = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] __UpperCAmelCase : Any = True __UpperCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase : Any = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : str = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) __UpperCAmelCase : int = len(_SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __UpperCAmelCase : Tuple = True __UpperCAmelCase : int = True __UpperCAmelCase : Union[str, Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : List[Any] = 1 self.assertEqual(out_len + added_hidden_states , len(_SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : str = outputs.attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] ): __UpperCAmelCase : Dict = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __UpperCAmelCase : str = outputs.hidden_states __UpperCAmelCase : Union[str, Any] = getattr( self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) # YOLOS has a different seq_length __UpperCAmelCase : Tuple = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __UpperCAmelCase : List[Any] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __UpperCAmelCase : Union[str, Any] = True check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*_SCREAMING_SNAKE_CASE ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = YolosModel.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None @slow def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Dict = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[Any] = self.default_image_processor __UpperCAmelCase : Tuple = prepare_img() __UpperCAmelCase : str = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE ) # forward pass with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(inputs.pixel_values ) # verify outputs __UpperCAmelCase : int = torch.Size((1, 100, 92) ) self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[Any] = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] , device=_SCREAMING_SNAKE_CASE , ) __UpperCAmelCase : Optional[int] = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=_SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) # verify postprocessing __UpperCAmelCase : List[str] = image_processor.post_process_object_detection( _SCREAMING_SNAKE_CASE , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] __UpperCAmelCase : List[Any] = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[Any] = [75, 75, 17, 63, 17] __UpperCAmelCase : int = torch.tensor([335.0_609, 79.3_848, 375.4_216, 187.2_495] ).to(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(results["""scores"""] ) , 5 ) self.assertTrue(torch.allclose(results["""scores"""] , _SCREAMING_SNAKE_CASE , atol=1e-4 ) ) self.assertSequenceEqual(results["""labels"""].tolist() , _SCREAMING_SNAKE_CASE ) self.assertTrue(torch.allclose(results["""boxes"""][0, :] , _SCREAMING_SNAKE_CASE ) )
371
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
0
"""simple docstring""" from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class lowerCamelCase__ : """simple docstring""" pass
350
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
0
"""simple docstring""" import math import sys def lowerCamelCase ( _UpperCamelCase : str ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = "" try: with open(_SCREAMING_SNAKE_CASE , """rb""" ) as binary_file: __UpperCAmelCase : int = binary_file.read() for dat in data: __UpperCAmelCase : List[str] = f'''{dat:08b}''' result += curr_byte return result except OSError: print("""File not accessible""" ) sys.exit() def lowerCamelCase ( _UpperCamelCase : str ) -> str: '''simple docstring''' __UpperCAmelCase : str = {"0": "0", "1": "1"} __UpperCAmelCase : Tuple = "", "" __UpperCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) for i in range(len(_SCREAMING_SNAKE_CASE ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __UpperCAmelCase : Dict = lexicon[curr_string] result += last_match_id __UpperCAmelCase : List[str] = last_match_id + "0" if math.loga(_SCREAMING_SNAKE_CASE ).is_integer(): __UpperCAmelCase : Dict = {} for curr_key in list(_SCREAMING_SNAKE_CASE ): __UpperCAmelCase : List[Any] = lexicon.pop(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : Tuple = new_lex __UpperCAmelCase : Optional[int] = last_match_id + "1" index += 1 __UpperCAmelCase : Optional[int] = "" return result def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> None: '''simple docstring''' __UpperCAmelCase : List[Any] = 8 try: with open(_SCREAMING_SNAKE_CASE , """wb""" ) as opened_file: __UpperCAmelCase : Any = [ to_write[i : i + byte_length] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append("""10000000""" ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array[:-1]: opened_file.write(int(_SCREAMING_SNAKE_CASE , 2 ).to_bytes(1 , byteorder="""big""" ) ) except OSError: print("""File not accessible""" ) sys.exit() def lowerCamelCase ( _UpperCamelCase : str ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[int] = 0 for letter in data_bits: if letter == "1": break counter += 1 __UpperCAmelCase : int = data_bits[counter:] __UpperCAmelCase : List[Any] = data_bits[counter + 1 :] return data_bits def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> None: '''simple docstring''' __UpperCAmelCase : Optional[int] = read_file_binary(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[Any] = remove_prefix(_SCREAMING_SNAKE_CASE ) __UpperCAmelCase : List[Any] = decompress_data(_SCREAMING_SNAKE_CASE ) write_file_binary(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
351
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
0
"""simple docstring""" import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class lowerCamelCase__ ( A ): """simple docstring""" __a = (PNDMScheduler,) __a = (("""num_inference_steps""", 50),) def lowerCamelCase__ ( self : List[str] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : str = { '''num_train_timesteps''': 1_000, '''beta_start''': 0.0001, '''beta_end''': 0.02, '''beta_schedule''': '''linear''', } config.update(**UpperCamelCase ) return config def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int=0 , **UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = dict(self.forward_default_kwargs ) __UpperCAmelCase : Any = kwargs.pop("""num_inference_steps""" , UpperCamelCase ) __UpperCAmelCase : List[Any] = self.dummy_sample __UpperCAmelCase : Union[str, Any] = 0.1 * sample __UpperCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase : List[str] = self.get_scheduler_config(**UpperCamelCase ) __UpperCAmelCase : List[Any] = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals __UpperCAmelCase : str = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase ) __UpperCAmelCase : List[Any] = scheduler_class.from_pretrained(UpperCamelCase ) new_scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals __UpperCAmelCase : Optional[int] = dummy_past_residuals[:] __UpperCAmelCase : List[Any] = scheduler.step_prk(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample __UpperCAmelCase : Dict = new_scheduler.step_prk(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __UpperCAmelCase : Optional[Any] = scheduler.step_plms(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample __UpperCAmelCase : str = new_scheduler.step_plms(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[int]=0 , **UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Any = dict(self.forward_default_kwargs ) __UpperCAmelCase : List[Any] = kwargs.pop("""num_inference_steps""" , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = self.dummy_sample __UpperCAmelCase : Union[str, Any] = 0.1 * sample __UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: __UpperCAmelCase : int = self.get_scheduler_config() __UpperCAmelCase : int = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residuals (must be after setting timesteps) __UpperCAmelCase : Optional[Any] = dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(UpperCamelCase ) __UpperCAmelCase : str = scheduler_class.from_pretrained(UpperCamelCase ) # copy over dummy past residuals new_scheduler.set_timesteps(UpperCamelCase ) # copy over dummy past residual (must be after setting timesteps) __UpperCAmelCase : Any = dummy_past_residuals[:] __UpperCAmelCase : Optional[Any] = scheduler.step_prk(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample __UpperCAmelCase : Optional[int] = new_scheduler.step_prk(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" __UpperCAmelCase : Dict = scheduler.step_plms(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample __UpperCAmelCase : str = new_scheduler.step_plms(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical" def lowerCamelCase__ ( self : Optional[Any] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.scheduler_classes[0] __UpperCAmelCase : Any = self.get_scheduler_config(**UpperCamelCase ) __UpperCAmelCase : Tuple = scheduler_class(**UpperCamelCase ) __UpperCAmelCase : Optional[Any] = 10 __UpperCAmelCase : Dict = self.dummy_model() __UpperCAmelCase : str = self.dummy_sample_deter scheduler.set_timesteps(UpperCamelCase ) for i, t in enumerate(scheduler.prk_timesteps ): __UpperCAmelCase : Dict = model(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[Any] = scheduler.step_prk(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): __UpperCAmelCase : Dict = model(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = scheduler.step_plms(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample return sample def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : int = dict(self.forward_default_kwargs ) __UpperCAmelCase : Tuple = kwargs.pop("""num_inference_steps""" , UpperCamelCase ) for scheduler_class in self.scheduler_classes: __UpperCAmelCase : List[str] = self.get_scheduler_config() __UpperCAmelCase : Union[str, Any] = scheduler_class(**UpperCamelCase ) __UpperCAmelCase : int = self.dummy_sample __UpperCAmelCase : List[str] = 0.1 * sample if num_inference_steps is not None and hasattr(UpperCamelCase , """set_timesteps""" ): scheduler.set_timesteps(UpperCamelCase ) elif num_inference_steps is not None and not hasattr(UpperCamelCase , """set_timesteps""" ): __UpperCAmelCase : Tuple = num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) __UpperCAmelCase : List[str] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] __UpperCAmelCase : Dict = dummy_past_residuals[:] __UpperCAmelCase : str = scheduler.step_prk(UpperCamelCase , 0 , UpperCamelCase , **UpperCamelCase ).prev_sample __UpperCAmelCase : Tuple = scheduler.step_prk(UpperCamelCase , 1 , UpperCamelCase , **UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) __UpperCAmelCase : List[str] = scheduler.step_plms(UpperCamelCase , 0 , UpperCamelCase , **UpperCamelCase ).prev_sample __UpperCAmelCase : str = scheduler.step_plms(UpperCamelCase , 1 , UpperCamelCase , **UpperCamelCase ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' for timesteps in [100, 1_000]: self.check_over_configs(num_train_timesteps=UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=UpperCamelCase ) __UpperCAmelCase : Optional[int] = self.scheduler_classes[0] __UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(steps_offset=1 ) __UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(10 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ): self.check_over_configs(beta_start=UpperCamelCase , beta_end=UpperCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' for t in [1, 5, 10]: self.check_over_forward(time_step=UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ): self.check_over_forward(num_inference_steps=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = 27 for scheduler_class in self.scheduler_classes: __UpperCAmelCase : Optional[Any] = self.dummy_sample __UpperCAmelCase : List[Any] = 0.1 * sample __UpperCAmelCase : Optional[Any] = self.get_scheduler_config() __UpperCAmelCase : Optional[Any] = scheduler_class(**UpperCamelCase ) scheduler.set_timesteps(UpperCamelCase ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): __UpperCAmelCase : Optional[int] = scheduler.step_prk(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' with self.assertRaises(UpperCamelCase ): __UpperCAmelCase : str = self.scheduler_classes[0] __UpperCAmelCase : Optional[int] = self.get_scheduler_config() __UpperCAmelCase : str = scheduler_class(**UpperCamelCase ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Tuple = self.full_loop() __UpperCAmelCase : int = torch.sum(torch.abs(UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 198.1318 ) < 1e-2 assert abs(result_mean.item() - 0.2580 ) < 1e-3 def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = self.full_loop(prediction_type="""v_prediction""" ) __UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase ) ) __UpperCAmelCase : List[str] = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 67.3986 ) < 1e-2 assert abs(result_mean.item() - 0.0878 ) < 1e-3 def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.full_loop(set_alpha_to_one=UpperCamelCase , beta_start=0.01 ) __UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(UpperCamelCase ) ) __UpperCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 230.0399 ) < 1e-2 assert abs(result_mean.item() - 0.2995 ) < 1e-3 def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=UpperCamelCase , beta_start=0.01 ) __UpperCAmelCase : Optional[int] = torch.sum(torch.abs(UpperCamelCase ) ) __UpperCAmelCase : Dict = torch.mean(torch.abs(UpperCamelCase ) ) assert abs(result_sum.item() - 186.9482 ) < 1e-2 assert abs(result_mean.item() - 0.2434 ) < 1e-3
352
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
0
"""simple docstring""" UpperCAmelCase : Union[str, Any] = range(2, 20 + 1) UpperCAmelCase : Dict = [10**k for k in range(ks[-1] + 1)] UpperCAmelCase : dict[int, dict[int, list[list[int]]]] = {} def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Dict = sum(a_i[j] for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ) __UpperCAmelCase : Optional[int] = sum(a_i[j] * base[j] for j in range(min(len(UpperCAmelCase_ ) , UpperCAmelCase_ ) ) ) __UpperCAmelCase ,__UpperCAmelCase : Tuple = 0, 0 __UpperCAmelCase : List[str] = n - i __UpperCAmelCase : Dict = memo.get(UpperCAmelCase_ ) if sub_memo is not None: __UpperCAmelCase : List[str] = sub_memo.get(UpperCAmelCase_ ) if jumps is not None and len(UpperCAmelCase_ ) > 0: # find and make the largest jump without going over __UpperCAmelCase : Union[str, Any] = -1 for _k in range(len(UpperCAmelCase_ ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: __UpperCAmelCase : Optional[Any] = _k break if max_jump >= 0: __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[str] = jumps[max_jump] # since the difference between jumps is cached, add c __UpperCAmelCase : Dict = diff + c for j in range(min(UpperCAmelCase_ , len(UpperCAmelCase_ ) ) ): __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = divmod(UpperCAmelCase_ , 1_0 ) if new_c > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) else: __UpperCAmelCase : int = [] else: __UpperCAmelCase : Dict = {c: []} __UpperCAmelCase : Union[str, Any] = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = next_term(UpperCAmelCase_ , k - 1 , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead __UpperCAmelCase ,__UpperCAmelCase : str = compute(UpperCAmelCase_ , UpperCAmelCase_ , i + dn , UpperCAmelCase_ ) diff += _diff dn += terms_jumped __UpperCAmelCase : str = sub_memo[c] # keep jumps sorted by # of terms skipped __UpperCAmelCase : Any = 0 while j < len(UpperCAmelCase_ ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(UpperCAmelCase_ , (diff, dn, k) ) return (diff, dn) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : str ) -> List[Any]: '''simple docstring''' if i >= n: return 0, i if k > len(UpperCAmelCase_ ): a_i.extend([0 for _ in range(k - len(UpperCAmelCase_ ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) __UpperCAmelCase : List[str] = i __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = 0, 0, 0 for j in range(len(UpperCAmelCase_ ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 __UpperCAmelCase : int = ds_c + ds_b diff += addend __UpperCAmelCase : Optional[Any] = 0 for j in range(UpperCAmelCase_ ): __UpperCAmelCase : Dict = a_i[j] + addend __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = divmod(UpperCAmelCase_ , 1_0 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) return diff, i - start_i def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) -> List[Any]: '''simple docstring''' for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ): __UpperCAmelCase : Optional[int] = digits[j] + addend if s >= 1_0: __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(UpperCAmelCase_ , 1_0 ) __UpperCAmelCase : List[Any] = addend // 1_0 + quotient else: __UpperCAmelCase : str = s __UpperCAmelCase : Optional[int] = addend // 1_0 if addend == 0: break while addend > 0: __UpperCAmelCase ,__UpperCAmelCase : int = divmod(UpperCAmelCase_ , 1_0 ) digits.append(UpperCAmelCase_ ) def lowerCamelCase ( _UpperCamelCase : int = 1_0**1_5 ) -> Dict: '''simple docstring''' __UpperCAmelCase : str = [1] __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : str = 0 while True: __UpperCAmelCase ,__UpperCAmelCase : Dict = next_term(UpperCAmelCase_ , 2_0 , i + dn , UpperCAmelCase_ ) dn += terms_jumped if dn == n - i: break __UpperCAmelCase : str = 0 for j in range(len(UpperCAmelCase_ ) ): a_n += digits[j] * 1_0**j return a_n if __name__ == "__main__": print(F"{solution() = }")
353
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import sys from collections import defaultdict class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] def lowerCamelCase__ ( self : int , UpperCamelCase : List[Any] ): '''simple docstring''' return self.node_position[vertex] def lowerCamelCase__ ( self : Dict , UpperCamelCase : str , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = pos def lowerCamelCase__ ( self : Any , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : int ): '''simple docstring''' if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __UpperCAmelCase : List[Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __UpperCAmelCase : Dict = 2 * start + 1 else: __UpperCAmelCase : Optional[Any] = 2 * start + 2 if heap[smallest_child] < heap[start]: __UpperCAmelCase : Tuple = heap[smallest_child], positions[smallest_child] __UpperCAmelCase : List[Any] = ( heap[start], positions[start], ) __UpperCAmelCase : Optional[Any] = temp, tempa __UpperCAmelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , UpperCamelCase ) self.top_to_bottom(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Tuple = position[index] while index != 0: __UpperCAmelCase : List[str] = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __UpperCAmelCase : Union[str, Any] = heap[parent] __UpperCAmelCase : Union[str, Any] = position[parent] self.set_position(position[parent] , UpperCamelCase ) else: __UpperCAmelCase : List[Any] = val __UpperCAmelCase : Tuple = temp self.set_position(UpperCamelCase , UpperCamelCase ) break __UpperCAmelCase : Tuple = parent else: __UpperCAmelCase : int = val __UpperCAmelCase : int = temp self.set_position(UpperCamelCase , 0 ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = len(UpperCamelCase ) // 2 - 1 for i in range(UpperCamelCase , -1 , -1 ): self.top_to_bottom(UpperCamelCase , UpperCamelCase , len(UpperCamelCase ) , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = positions[0] __UpperCAmelCase : List[str] = sys.maxsize self.top_to_bottom(UpperCamelCase , 0 , len(UpperCamelCase ) , UpperCamelCase ) return temp def lowerCamelCase ( _UpperCamelCase : str ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = Heap() __UpperCAmelCase : Optional[int] = [0] * len(_lowerCAmelCase ) __UpperCAmelCase : Tuple = [-1] * len(_lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __UpperCAmelCase : Optional[int] = [] # Heap of Distance of vertices from their neighboring vertex __UpperCAmelCase : List[Any] = [] for vertex in range(len(_lowerCAmelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_lowerCAmelCase ) heap.node_position.append(_lowerCAmelCase ) __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : Tuple = sys.maxsize for neighbor, distance in adjacency_list[0]: __UpperCAmelCase : str = 0 __UpperCAmelCase : Any = distance heap.heapify(_lowerCAmelCase , _lowerCAmelCase ) for _ in range(1 , len(_lowerCAmelCase ) ): __UpperCAmelCase : Any = heap.delete_minimum(_lowerCAmelCase , _lowerCAmelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __UpperCAmelCase : List[str] = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_lowerCAmelCase )] ): __UpperCAmelCase : Optional[Any] = distance heap.bottom_to_top( _lowerCAmelCase , heap.get_position(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase ) __UpperCAmelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > UpperCAmelCase : Dict = int(input('Enter number of edges: ').strip()) UpperCAmelCase : Tuple = defaultdict(list) for _ in range(edges_number): UpperCAmelCase : str = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
354
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import copy from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Audio, Features, Value from .base import TaskTemplate @dataclass(frozen=A ) class lowerCamelCase__ ( A ): """simple docstring""" __a = field(default="""automatic-speech-recognition""" , metadata={"""include_in_asdict_even_if_is_default""": True} ) __a = Features({"""audio""": Audio()} ) __a = Features({"""transcription""": Value("""string""" )} ) __a = """audio""" __a = """transcription""" def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' if self.audio_column not in features: raise ValueError(f'''Column {self.audio_column} is not present in features.''' ) if not isinstance(features[self.audio_column] , lowerCamelCase_ ): raise ValueError(f'''Column {self.audio_column} is not an Audio type.''' ) __UpperCAmelCase : List[Any] = copy.deepcopy(self ) __UpperCAmelCase : List[Any] = self.input_schema.copy() __UpperCAmelCase : Tuple = features[self.audio_column] __UpperCAmelCase : Any = input_schema return task_template @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return {self.audio_column: "audio", self.transcription_column: "transcription"}
355
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
0
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : str = { 'snap-research/efficientformer-l1-300': ( 'https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json' ), } class lowerCamelCase__ ( _lowerCamelCase ): """simple docstring""" __a = """efficientformer""" def __init__( self : Any , UpperCamelCase : List[int] = [3, 2, 6, 4] , UpperCamelCase : List[int] = [48, 96, 224, 448] , UpperCamelCase : List[bool] = [True, True, True, True] , UpperCamelCase : int = 448 , UpperCamelCase : int = 32 , UpperCamelCase : int = 4 , UpperCamelCase : int = 7 , UpperCamelCase : int = 5 , UpperCamelCase : int = 8 , UpperCamelCase : int = 4 , UpperCamelCase : float = 0.0 , UpperCamelCase : int = 16 , UpperCamelCase : int = 3 , UpperCamelCase : int = 3 , UpperCamelCase : int = 3 , UpperCamelCase : int = 2 , UpperCamelCase : int = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : int = 1 , UpperCamelCase : bool = True , UpperCamelCase : bool = True , UpperCamelCase : float = 1e-5 , UpperCamelCase : str = "gelu" , UpperCamelCase : float = 0.02 , UpperCamelCase : float = 1e-1_2 , UpperCamelCase : int = 224 , UpperCamelCase : float = 1e-0_5 , **UpperCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(**lowercase_ ) __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout_prob __UpperCAmelCase : Union[str, Any] = hidden_sizes __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : List[str] = num_attention_heads __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Dict = layer_norm_eps __UpperCAmelCase : List[Any] = patch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Tuple = depths __UpperCAmelCase : Dict = mlp_expansion_ratio __UpperCAmelCase : List[Any] = downsamples __UpperCAmelCase : Tuple = dim __UpperCAmelCase : List[str] = key_dim __UpperCAmelCase : List[str] = attention_ratio __UpperCAmelCase : Union[str, Any] = resolution __UpperCAmelCase : Optional[Any] = pool_size __UpperCAmelCase : str = downsample_patch_size __UpperCAmelCase : Optional[Any] = downsample_stride __UpperCAmelCase : str = downsample_pad __UpperCAmelCase : Any = drop_path_rate __UpperCAmelCase : Optional[Any] = num_metaad_blocks __UpperCAmelCase : int = distillation __UpperCAmelCase : Any = use_layer_scale __UpperCAmelCase : Optional[Any] = layer_scale_init_value __UpperCAmelCase : str = image_size __UpperCAmelCase : Optional[int] = batch_norm_eps
356
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Tuple = '''▁''' UpperCAmelCase : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model'''} UpperCAmelCase : List[Any] = { '''vocab_file''': { '''facebook/mbart-large-50-one-to-many-mmt''': ( '''https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model''' ), } } UpperCAmelCase : str = { '''facebook/mbart-large-50-one-to-many-mmt''': 1024, } # fmt: off UpperCAmelCase : Dict = ['''ar_AR''', '''cs_CZ''', '''de_DE''', '''en_XX''', '''es_XX''', '''et_EE''', '''fi_FI''', '''fr_XX''', '''gu_IN''', '''hi_IN''', '''it_IT''', '''ja_XX''', '''kk_KZ''', '''ko_KR''', '''lt_LT''', '''lv_LV''', '''my_MM''', '''ne_NP''', '''nl_XX''', '''ro_RO''', '''ru_RU''', '''si_LK''', '''tr_TR''', '''vi_VN''', '''zh_CN''', '''af_ZA''', '''az_AZ''', '''bn_IN''', '''fa_IR''', '''he_IL''', '''hr_HR''', '''id_ID''', '''ka_GE''', '''km_KH''', '''mk_MK''', '''ml_IN''', '''mn_MN''', '''mr_IN''', '''pl_PL''', '''ps_AF''', '''pt_XX''', '''sv_SE''', '''sw_KE''', '''ta_IN''', '''te_IN''', '''th_TH''', '''tl_XX''', '''uk_UA''', '''ur_PK''', '''xh_ZA''', '''gl_ES''', '''sl_SI'''] class lowerCamelCase__ ( A__ ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = PRETRAINED_VOCAB_FILES_MAP __a = ['input_ids', 'attention_mask'] __a = [] __a = [] def __init__( self : str , UpperCamelCase : List[str] , UpperCamelCase : int=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Tuple="</s>" , UpperCamelCase : Tuple="</s>" , UpperCamelCase : List[str]="<s>" , UpperCamelCase : int="<unk>" , UpperCamelCase : str="<pad>" , UpperCamelCase : List[Any]="<mask>" , UpperCamelCase : int = None , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' __UpperCAmelCase : List[str] = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token __UpperCAmelCase : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs __UpperCAmelCase : str = kwargs.get("""additional_special_tokens""" , [] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=lowerCamelCase__ , tgt_lang=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **lowerCamelCase__ , ) __UpperCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(lowerCamelCase__ ) ) __UpperCAmelCase : int = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __UpperCAmelCase : List[Any] = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __UpperCAmelCase : List[str] = 1 __UpperCAmelCase : List[Any] = len(self.sp_model ) __UpperCAmelCase : Any = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(lowerCamelCase__ ) } __UpperCAmelCase : List[str] = {v: k for k, v in self.lang_code_to_id.items()} __UpperCAmelCase : Dict = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) __UpperCAmelCase : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} __UpperCAmelCase : List[str] = src_lang if src_lang is not None else """en_XX""" __UpperCAmelCase : Dict = self.lang_code_to_id[self._src_lang] __UpperCAmelCase : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def lowerCamelCase__ ( self : Any ): '''simple docstring''' return self._src_lang @src_lang.setter def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.__dict__.copy() __UpperCAmelCase : Optional[int] = None return state def __setstate__( self : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = {self.convert_ids_to_tokens(lowerCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return self.sp_model.encode(lowerCamelCase__ , out_type=lowerCamelCase__ ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : int ): '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __UpperCAmelCase : Optional[Any] = self.sp_model.PieceToId(lowerCamelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any ): '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = [] __UpperCAmelCase : int = """""" __UpperCAmelCase : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(lowerCamelCase__ ) + token __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : Any = [] else: current_sub_tokens.append(lowerCamelCase__ ) __UpperCAmelCase : Union[str, Any] = False out_string += self.sp_model.decode(lowerCamelCase__ ) return out_string.strip() def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] = None ): '''simple docstring''' if not os.path.isdir(lowerCamelCase__ ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Union[str, Any] = os.path.join( lowerCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , lowerCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(lowerCamelCase__ , """wb""" ) as fi: __UpperCAmelCase : int = self.sp_model.serialized_model_proto() fi.write(lowerCamelCase__ ) return (out_vocab_file,) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] = None , UpperCamelCase : List[str] = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ ) __UpperCAmelCase : List[str] = [1] * len(self.prefix_tokens ) __UpperCAmelCase : Optional[int] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : int = None ): '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Tuple , **UpperCamelCase : Optional[int] ): '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" ) __UpperCAmelCase : Optional[int] = src_lang __UpperCAmelCase : Dict = self(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors=lowerCamelCase__ , **lowerCamelCase__ ) __UpperCAmelCase : Tuple = self.convert_tokens_to_ids(lowerCamelCase__ ) __UpperCAmelCase : Dict = tgt_lang_id return inputs def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : int = "en_XX" , UpperCamelCase : Any = None , UpperCamelCase : List[str] = "ro_RO" , **UpperCamelCase : List[str] , ): '''simple docstring''' __UpperCAmelCase : Any = src_lang __UpperCAmelCase : Optional[int] = tgt_lang return super().prepare_seqaseq_batch(lowerCamelCase__ , lowerCamelCase__ , **lowerCamelCase__ ) def lowerCamelCase__ ( self : int ): '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def lowerCamelCase__ ( self : str ): '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = self.lang_code_to_id[src_lang] __UpperCAmelCase : Dict = [self.cur_lang_code_id] __UpperCAmelCase : Tuple = [self.eos_token_id] def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Dict = self.lang_code_to_id[tgt_lang] __UpperCAmelCase : Tuple = [self.cur_lang_code_id] __UpperCAmelCase : Union[str, Any] = [self.eos_token_id]
357
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
0
"""simple docstring""" from collections.abc import Sequence from queue import Queue class lowerCamelCase__ : """simple docstring""" def __init__( self : str , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Any=None , UpperCamelCase : Any=None ): '''simple docstring''' __UpperCAmelCase : int = start __UpperCAmelCase : int = end __UpperCAmelCase : str = val __UpperCAmelCase : Dict = (start + end) // 2 __UpperCAmelCase : List[str] = left __UpperCAmelCase : List[Any] = right def __repr__( self : Tuple ): '''simple docstring''' return f'''SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})''' class lowerCamelCase__ : """simple docstring""" def __init__( self : int , UpperCamelCase : Sequence , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = collection __UpperCAmelCase : int = function if self.collection: __UpperCAmelCase : List[str] = self._build_tree(0 , len(lowercase_ ) - 1 ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' self._update_tree(self.root , lowercase_ , lowercase_ ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple ): '''simple docstring''' return self._query_range(self.root , lowercase_ , lowercase_ ) def lowerCamelCase__ ( self : Any , UpperCamelCase : int , UpperCamelCase : Dict ): '''simple docstring''' if start == end: return SegmentTreeNode(lowercase_ , lowercase_ , self.collection[start] ) __UpperCAmelCase : Dict = (start + end) // 2 __UpperCAmelCase : str = self._build_tree(lowercase_ , lowercase_ ) __UpperCAmelCase : Dict = self._build_tree(mid + 1 , lowercase_ ) return SegmentTreeNode(lowercase_ , lowercase_ , self.fn(left.val , right.val ) , lowercase_ , lowercase_ ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple ): '''simple docstring''' if node.start == i and node.end == i: __UpperCAmelCase : List[Any] = val return if i <= node.mid: self._update_tree(node.left , lowercase_ , lowercase_ ) else: self._update_tree(node.right , lowercase_ , lowercase_ ) __UpperCAmelCase : int = self.fn(node.left.val , node.right.val ) def lowerCamelCase__ ( self : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , lowercase_ , lowercase_ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , lowercase_ , node.mid ) , self._query_range(node.right , node.mid + 1 , lowercase_ ) , ) else: # range in right child tree return self._query_range(node.right , lowercase_ , lowercase_ ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' if self.root is not None: __UpperCAmelCase : int = Queue() queue.put(self.root ) while not queue.empty(): __UpperCAmelCase : int = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print('*' * 50) UpperCAmelCase : int = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
358
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
0
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : str ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __UpperCAmelCase : Tuple = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Any = TFAutoModel.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : List[Any] = AutoModel.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : List[str] = TFAutoModelForPreTraining.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : int = AutoModelForPreTraining.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def lowerCamelCase__ ( self : str ): '''simple docstring''' for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[Any] = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Optional[Any] = TFAutoModelForCausalLM.from_pretrained(__A , from_pt=__A ) __UpperCAmelCase ,__UpperCAmelCase : Dict = TFAutoModelForCausalLM.from_pretrained( __A , output_loading_info=__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(__A , from_tf=__A ) __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = AutoModelForCausalLM.from_pretrained( __A , output_loading_info=__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : int = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Optional[Any] = AutoModelWithLMHead.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : str = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Tuple = TFAutoModelForMaskedLM.from_pretrained(__A , from_pt=__A ) __UpperCAmelCase ,__UpperCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained( __A , output_loading_info=__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Tuple = AutoModelForMaskedLM.from_pretrained(__A , from_tf=__A ) __UpperCAmelCase ,__UpperCAmelCase : str = AutoModelForMaskedLM.from_pretrained( __A , output_loading_info=__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Dict = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : str = TFAutoModelForSeqaSeqLM.from_pretrained(__A , from_pt=__A ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = TFAutoModelForSeqaSeqLM.from_pretrained( __A , output_loading_info=__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(__A , from_tf=__A ) __UpperCAmelCase ,__UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained( __A , output_loading_info=__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __UpperCAmelCase : int = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : int = TFAutoModelForSequenceClassification.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : int = AutoModelForSequenceClassification.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_name in ["bert-base-uncased"]: __UpperCAmelCase : List[str] = AutoConfig.from_pretrained(__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : List[str] = TFAutoModelForQuestionAnswering.from_pretrained(__A , from_pt=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) __UpperCAmelCase : Optional[Any] = AutoModelForQuestionAnswering.from_pretrained(__A , from_tf=__A ) self.assertIsNotNone(__A ) self.assertIsInstance(__A , __A ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14_410 ) __UpperCAmelCase : str = AutoModelWithLMHead.from_pretrained(__A , from_tf=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14_410 ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFAutoModelWithLMHead.from_pretrained(__A , from_pt=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14_410 ) __UpperCAmelCase : List[Any] = AutoModelWithLMHead.from_pretrained(__A , from_tf=__A ) self.assertIsInstance(__A , __A ) self.assertEqual(model.num_parameters() , 14_410 ) self.assertEqual(model.num_parameters(only_trainable=__A ) , 14_410 )
359
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
0
"""simple docstring""" import random from typing import Any def lowerCamelCase ( _UpperCamelCase : list ) -> Dict: '''simple docstring''' for _ in range(len(_UpperCamelCase ) ): __UpperCAmelCase : List[str] = random.randint(0 , len(_UpperCamelCase ) - 1 ) __UpperCAmelCase : Dict = random.randint(0 , len(_UpperCamelCase ) - 1 ) __UpperCAmelCase : Any = data[b], data[a] return data if __name__ == "__main__": UpperCAmelCase : str = [0, 1, 2, 3, 4, 5, 6, 7] UpperCAmelCase : Optional[int] = ["python", "says", "hello", "!"] print('Fisher-Yates Shuffle:') print('List', integers, strings) print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
360
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
0
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int ) -> Dict: '''simple docstring''' __UpperCAmelCase : int = abs(_a ) __UpperCAmelCase : Dict = 0 while n > 0: res += n % 1_0 n //= 1_0 return res def lowerCamelCase ( _UpperCamelCase : int ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = abs(_a ) return n if n < 1_0 else n % 1_0 + sum_of_digits(n // 1_0 ) def lowerCamelCase ( _UpperCamelCase : int ) -> List[str]: '''simple docstring''' return sum(int(_a ) for c in str(abs(_a ) ) ) def lowerCamelCase ( ) -> Dict: '''simple docstring''' from collections.abc import Callable from timeit import timeit def benchmark_a_function(_UpperCamelCase : Callable , _UpperCamelCase : int ) -> None: __UpperCAmelCase : List[Any] = f'''{func.__name__}({value})''' __UpperCAmelCase : Tuple = timeit(f'''__main__.{call}''' , setup="""import __main__""" ) print(f'''{call:56} = {func(_a )} -- {timing:.4f} seconds''' ) for value in (2_6_2_1_4_4, 1_1_2_5_8_9_9_9_0_6_8_4_2_6_2_4, 1_2_6_7_6_5_0_6_0_0_2_2_8_2_2_9_4_0_1_4_9_6_7_0_3_2_0_5_3_7_6): for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact): benchmark_a_function(_a , _a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
361
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
0
"""simple docstring""" from . import ( albert, align, altclip, audio_spectrogram_transformer, auto, autoformer, bark, bart, barthez, bartpho, beit, bert, bert_generation, bert_japanese, bertweet, big_bird, bigbird_pegasus, biogpt, bit, blenderbot, blenderbot_small, blip, blip_a, bloom, bridgetower, byta, camembert, canine, chinese_clip, clap, clip, clipseg, codegen, conditional_detr, convbert, convnext, convnextva, cpm, cpmant, ctrl, cvt, dataavec, deberta, deberta_va, decision_transformer, deformable_detr, deit, deprecated, deta, detr, dialogpt, dinat, distilbert, dit, donut, dpr, dpt, efficientformer, efficientnet, electra, encodec, encoder_decoder, ernie, ernie_m, esm, falcon, flaubert, flava, fnet, focalnet, fsmt, funnel, git, glpn, gpta, gpt_bigcode, gpt_neo, gpt_neox, gpt_neox_japanese, gpt_swa, gptj, gptsan_japanese, graphormer, groupvit, herbert, hubert, ibert, imagegpt, informer, instructblip, jukebox, layoutlm, layoutlmva, layoutlmva, layoutxlm, led, levit, lilt, llama, longformer, longta, luke, lxmert, mam_aaa, marian, markuplm, maskaformer, maskformer, mbart, mbartaa, mega, megatron_bert, megatron_gpta, mgp_str, mluke, mobilebert, mobilenet_va, mobilenet_va, mobilevit, mobilevitva, mpnet, mra, mta, musicgen, mvp, nat, nezha, nllb, nllb_moe, nystromformer, oneformer, open_llama, openai, opt, owlvit, pegasus, pegasus_x, perceiver, phobert, pixastruct, plbart, poolformer, prophetnet, qdqbert, rag, realm, reformer, regnet, rembert, resnet, roberta, roberta_prelayernorm, roc_bert, roformer, rwkv, sam, segformer, sew, sew_d, speech_encoder_decoder, speech_to_text, speech_to_text_a, speechta, splinter, squeezebert, swiftformer, swin, swinasr, swinva, switch_transformers, ta, table_transformer, tapas, time_series_transformer, timesformer, timm_backbone, transfo_xl, trocr, tvlt, umta, unispeech, unispeech_sat, upernet, videomae, vilt, vision_encoder_decoder, vision_text_dual_encoder, visual_bert, vit, vit_hybrid, vit_mae, vit_msn, vivit, wavaveca, wavaveca_conformer, wavaveca_phoneme, wavaveca_with_lm, wavlm, whisper, x_clip, xglm, xlm, xlm_prophetnet, xlm_roberta, xlm_roberta_xl, xlnet, xmod, yolos, yoso, )
362
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Any = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = ['''FNetTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['''FNetTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ '''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FNetForMaskedLM''', '''FNetForMultipleChoice''', '''FNetForNextSentencePrediction''', '''FNetForPreTraining''', '''FNetForQuestionAnswering''', '''FNetForSequenceClassification''', '''FNetForTokenClassification''', '''FNetLayer''', '''FNetModel''', '''FNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
363
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
0
"""simple docstring""" import importlib.metadata import operator import re import sys from typing import Optional from packaging import version UpperCAmelCase : Tuple = { """<""": operator.lt, """<=""": operator.le, """==""": operator.eq, """!=""": operator.ne, """>=""": operator.ge, """>""": operator.gt, } def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Any , _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Tuple ) -> Optional[int]: '''simple docstring''' if got_ver is None or want_ver is None: raise ValueError( f'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider''' f''' reinstalling {pkg}.''' ) if not ops[op](version.parse(_UpperCamelCase ) , version.parse(_UpperCamelCase ) ): raise ImportError( f'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[str] = None ) -> None: '''simple docstring''' __UpperCAmelCase : List[str] = f'''\n{hint}''' if hint is not None else """""" # non-versioned check if re.match(R"""^[\w_\-\d]+$""" , _UpperCamelCase ): __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : int = requirement, None, None else: __UpperCAmelCase : Union[str, Any] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , _UpperCamelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but""" f''' got {requirement}''' ) __UpperCAmelCase ,__UpperCAmelCase : List[Any] = match[0] __UpperCAmelCase : Optional[Any] = want_full.split(""",""" ) # there could be multiple requirements __UpperCAmelCase : Dict = {} for w in want_range: __UpperCAmelCase : int = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , _UpperCamelCase ) if not match: raise ValueError( """requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,""" f''' but got {requirement}''' ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = match[0] __UpperCAmelCase : List[str] = want_ver if op not in ops: raise ValueError(f'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' ) # special case if pkg == "python": __UpperCAmelCase : int = """.""".join([str(_UpperCamelCase ) for x in sys.version_info[:3]] ) for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) return # check if any version is installed try: __UpperCAmelCase : List[Any] = importlib.metadata.version(_UpperCamelCase ) except importlib.metadata.PackageNotFoundError: raise importlib.metadata.PackageNotFoundError( f'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' ) # check that the right version is installed if version number or a range was provided if want_ver is not None: for op, want_ver in wanted.items(): _compare_versions(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) def lowerCamelCase ( _UpperCamelCase : Dict ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[Any] = """Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main""" return require_version(_UpperCamelCase , _UpperCamelCase )
364
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
0
"""simple docstring""" import argparse import math import traceback import dateutil.parser as date_parser import requests def lowerCamelCase ( _UpperCamelCase : Tuple ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = {} __UpperCAmelCase : str = job["""started_at"""] __UpperCAmelCase : str = job["""completed_at"""] __UpperCAmelCase : Tuple = date_parser.parse(snake_case__ ) __UpperCAmelCase : int = date_parser.parse(snake_case__ ) __UpperCAmelCase : Any = round((end_datetime - start_datetime).total_seconds() / 60.0 ) __UpperCAmelCase : Any = start __UpperCAmelCase : Dict = end __UpperCAmelCase : str = duration_in_min return job_info def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Union[str, Any]=None ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : List[Any] = None if token is not None: __UpperCAmelCase : Any = {"""Accept""": """application/vnd.github+json""", """Authorization""": f'''Bearer {token}'''} __UpperCAmelCase : int = f'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100''' __UpperCAmelCase : Dict = requests.get(snake_case__ , headers=snake_case__ ).json() __UpperCAmelCase : Dict = {} try: job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) __UpperCAmelCase : Union[str, Any] = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 ) for i in range(snake_case__ ): __UpperCAmelCase : List[str] = requests.get(url + f'''&page={i + 2}''' , headers=snake_case__ ).json() job_time.update({job["""name"""]: extract_time_from_single_job(snake_case__ ) for job in result["""jobs"""]} ) return job_time except Exception: print(f'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' ) return {} if __name__ == "__main__": UpperCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.') UpperCAmelCase : Any = parser.parse_args() UpperCAmelCase : Optional[int] = get_job_time(args.workflow_run_id) UpperCAmelCase : List[str] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True)) for k, v in job_time.items(): print(F"{k}: {v['duration']}")
365
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
0
"""simple docstring""" from collections import defaultdict class lowerCamelCase__ : """simple docstring""" def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 __UpperCAmelCase : List[Any] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(SCREAMING_SNAKE_CASE_ ) ) ] __UpperCAmelCase : Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE_ ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 __UpperCAmelCase : List[Any] = (1 << len(SCREAMING_SNAKE_CASE_ )) - 1 def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] ): '''simple docstring''' if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement __UpperCAmelCase : List[Any] = self.count_ways_until(SCREAMING_SNAKE_CASE_ , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. __UpperCAmelCase : List[Any] = total_ways_util return self.dp[mask][task_no] def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' for i in range(len(SCREAMING_SNAKE_CASE_ ) ): for j in task_performed[i]: self.task[j].append(SCREAMING_SNAKE_CASE_ ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": UpperCAmelCase : Optional[Any] = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. UpperCAmelCase : Optional[Any] = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
366
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
0
"""simple docstring""" UpperCAmelCase : int = { 'meter': 'm', 'kilometer': 'km', 'megametre': 'Mm', 'gigametre': 'Gm', 'terametre': 'Tm', 'petametre': 'Pm', 'exametre': 'Em', 'zettametre': 'Zm', 'yottametre': 'Ym', } # Exponent of the factor(meter) UpperCAmelCase : Optional[int] = { 'm': 0, 'km': 3, 'Mm': 6, 'Gm': 9, 'Tm': 12, 'Pm': 15, 'Em': 18, 'Zm': 21, 'Ym': 24, } def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] ) -> float: '''simple docstring''' __UpperCAmelCase : str = from_type.lower().strip("""s""" ) __UpperCAmelCase : Tuple = to_type.lower().strip("""s""" ) __UpperCAmelCase : Dict = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) __UpperCAmelCase : Tuple = UNIT_SYMBOL.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if from_sanitized not in METRIC_CONVERSION: __UpperCAmelCase : List[Any] = ( f'''Invalid \'from_type\' value: {from_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) if to_sanitized not in METRIC_CONVERSION: __UpperCAmelCase : Optional[Any] = ( f'''Invalid \'to_type\' value: {to_type!r}.\n''' f'''Conversion abbreviations are: {", ".join(SCREAMING_SNAKE_CASE__ )}''' ) raise ValueError(SCREAMING_SNAKE_CASE__ ) __UpperCAmelCase : str = METRIC_CONVERSION[from_sanitized] __UpperCAmelCase : List[str] = METRIC_CONVERSION[to_sanitized] __UpperCAmelCase : Tuple = 1 if from_exponent > to_exponent: __UpperCAmelCase : str = from_exponent - to_exponent else: __UpperCAmelCase : Optional[int] = -(to_exponent - from_exponent) return value * pow(1_0 , SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": from doctest import testmod testmod()
367
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
0
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( _a , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', ] __UpperCAmelCase : Union[str, Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] __UpperCAmelCase : Any = {'unk_token': '<unk>'} __UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Optional[Any] , **UpperCamelCase : List[Any] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : Tuple ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __UpperCAmelCase : Optional[int] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Dict = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Any = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Dict = [ 'Summary of the text.', 'Another summary.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[str] = ['A long paragraph for summarization.'] __UpperCAmelCase : Dict = [ 'Summary of the text.', ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : int = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Dict = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : str = inputs['input_ids'] __UpperCAmelCase : Optional[int] = targets['input_ids'] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : str ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Union[str, Any] = ['Summary of the text.', 'Another summary.'] __UpperCAmelCase : List[Any] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : str = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : List[Any] = [[0] * len(UpperCamelCase ) for x in encoded_output['input_ids']] __UpperCAmelCase : Optional[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Dict = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : int = 'A, <mask> AllenNLP sentence.' __UpperCAmelCase : int = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
368
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
0
"""simple docstring""" import numpy as np import torch import torch.nn as nn from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) class lowerCamelCase__ ( __a ): """simple docstring""" __a = CLIPConfig __a = ["""CLIPEncoderLayer"""] def __init__( self : Dict , UpperCamelCase : CLIPConfig ): '''simple docstring''' super().__init__(UpperCamelCase__ ) __UpperCAmelCase : Union[str, Any] = CLIPVisionModelWithProjection(config.vision_config ) __UpperCAmelCase : List[str] = nn.Linear(config.vision_config.projection_dim , 1 ) __UpperCAmelCase : Any = nn.Linear(config.vision_config.projection_dim , 1 ) @torch.no_grad() def lowerCamelCase__ ( self : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Union[str, Any]=0.5 , UpperCamelCase : Tuple=0.5 ): '''simple docstring''' __UpperCAmelCase : Tuple = self.vision_model(UpperCamelCase__ )[0] __UpperCAmelCase : List[Any] = self.p_head(UpperCamelCase__ ) __UpperCAmelCase : str = nsfw_detected.flatten() __UpperCAmelCase : List[str] = nsfw_detected > p_threshold __UpperCAmelCase : List[str] = nsfw_detected.tolist() if any(UpperCamelCase__ ): logger.warning( """Potential NSFW content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""" ) for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ): if nsfw_detected_: __UpperCAmelCase : List[str] = np.zeros(images[idx].shape ) __UpperCAmelCase : List[Any] = self.w_head(UpperCamelCase__ ) __UpperCAmelCase : List[str] = watermark_detected.flatten() __UpperCAmelCase : Union[str, Any] = watermark_detected > w_threshold __UpperCAmelCase : List[str] = watermark_detected.tolist() if any(UpperCamelCase__ ): logger.warning( """Potential watermarked content was detected in one or more images. A black image will be returned instead.""" """ Try again with a different prompt and/or seed.""" ) for idx, watermark_detected_ in enumerate(UpperCamelCase__ ): if watermark_detected_: __UpperCAmelCase : Tuple = np.zeros(images[idx].shape ) return images, nsfw_detected, watermark_detected
369
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
0
"""simple docstring""" from __future__ import annotations import bisect def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int = 0 , _UpperCamelCase : int = -1 ) -> Tuple: '''simple docstring''' if hi < 0: __UpperCAmelCase : List[str] = len(snake_case_ ) while lo < hi: __UpperCAmelCase : int = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __UpperCAmelCase : Optional[Any] = mid + 1 else: __UpperCAmelCase : Optional[int] = mid return lo def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int = 0 , _UpperCamelCase : int = -1 ) -> Tuple: '''simple docstring''' if hi < 0: __UpperCAmelCase : Dict = len(snake_case_ ) while lo < hi: __UpperCAmelCase : int = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __UpperCAmelCase : List[str] = mid + 1 else: __UpperCAmelCase : Any = mid return lo def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int = 0 , _UpperCamelCase : int = -1 ) -> List[str]: '''simple docstring''' sorted_collection.insert(bisect_left(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int = 0 , _UpperCamelCase : int = -1 ) -> List[str]: '''simple docstring''' sorted_collection.insert(bisect_right(snake_case_ , snake_case_ , snake_case_ , snake_case_ ) , snake_case_ ) def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Any = 0 __UpperCAmelCase : List[Any] = len(snake_case_ ) - 1 while left <= right: __UpperCAmelCase : Optional[int] = left + (right - left) // 2 __UpperCAmelCase : List[str] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __UpperCAmelCase : Any = midpoint - 1 else: __UpperCAmelCase : Dict = midpoint + 1 return None def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Tuple = bisect.bisect_left(snake_case_ , snake_case_ ) if index != len(snake_case_ ) and sorted_collection[index] == item: return index return None def lowerCamelCase ( _UpperCamelCase : list[int] , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> Union[str, Any]: '''simple docstring''' if right < left: return None __UpperCAmelCase : str = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(snake_case_ , snake_case_ , snake_case_ , midpoint - 1 ) else: return binary_search_by_recursion(snake_case_ , snake_case_ , midpoint + 1 , snake_case_ ) if __name__ == "__main__": UpperCAmelCase : Optional[Any] = input('Enter numbers separated by comma:\n').strip() UpperCAmelCase : Optional[int] = sorted(int(item) for item in user_input.split(',')) UpperCAmelCase : Optional[Any] = int(input('Enter a single number to be found in the list:\n')) UpperCAmelCase : int = binary_search(collection, target) if result is None: print(F"{target} was not found in {collection}.") else: print(F"{target} was found at position {result} in {collection}.")
370
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
0
"""simple docstring""" import os from collections import namedtuple import pytest from datasets import ClassLabel, Features, Sequence, Value from datasets.commands.test import TestCommand from datasets.info import DatasetInfo, DatasetInfosDict UpperCAmelCase : Dict = namedtuple( '_TestCommandArgs', [ 'dataset', 'name', 'cache_dir', 'data_dir', 'all_configs', 'save_infos', 'ignore_verifications', 'force_redownload', 'clear_cache', ], defaults=[None, None, None, False, False, False, False, False], ) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : int ) -> List[Any]: '''simple docstring''' return (abs(source - target ) / target) < 0.01 @pytest.mark.integration def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Tuple = _TestCommandArgs(dataset=a__ , all_configs=a__ , save_infos=a__ ) __UpperCAmelCase : Optional[int] = TestCommand(*a__ ) test_command.run() __UpperCAmelCase : Optional[int] = os.path.join(a__ , """README.md""" ) assert os.path.exists(a__ ) __UpperCAmelCase : str = DatasetInfosDict.from_directory(a__ ) __UpperCAmelCase : Union[str, Any] = DatasetInfosDict( { """default""": DatasetInfo( features=Features( { """tokens""": Sequence(Value("""string""" ) ), """ner_tags""": Sequence( ClassLabel(names=["""O""", """B-PER""", """I-PER""", """B-ORG""", """I-ORG""", """B-LOC""", """I-LOC"""] ) ), """langs""": Sequence(Value("""string""" ) ), """spans""": Sequence(Value("""string""" ) ), } ) , splits=[ { """name""": """train""", """num_bytes""": 2_3_5_1_5_6_3, """num_examples""": 1_0_0_0_0, }, { """name""": """validation""", """num_bytes""": 2_3_8_4_1_8, """num_examples""": 1_0_0_0, }, ] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , ) } ) assert dataset_infos.keys() == expected_dataset_infos.keys() for key in DatasetInfo._INCLUDED_INFO_IN_YAML: __UpperCAmelCase ,__UpperCAmelCase : List[str] = getattr(dataset_infos["""default"""] , a__ ), getattr(expected_dataset_infos["""default"""] , a__ ) if key == "num_bytes": assert is_apercent_close(a__ , a__ ) elif key == "splits": assert list(a__ ) == list(a__ ) for split in result: assert result[split].name == expected[split].name assert result[split].num_examples == expected[split].num_examples assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes ) else: result == expected
371
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
0
"""simple docstring""" import numpy as np def lowerCamelCase ( _UpperCamelCase : Any ) -> Optional[int]: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> int: '''simple docstring''' return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
350
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
0