code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): return len(set(SCREAMING_SNAKE_CASE ) ) == len(SCREAMING_SNAKE_CASE ) if __name__ == "__main__": import doctest doctest.testmod()
186
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE : List[str] = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Any = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
'''simple docstring''' from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline else: from .pipeline_kandinsky import KandinskyPipeline from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput from .text_encoder import MultilingualCLIP
80
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _SCREAMING_SNAKE_CASE : Union[str, Any] = False class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return 12 @property def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return 12 @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' return 32 @property def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(a__ ) @property def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = 12 snake_case_ = 12 snake_case_ = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } snake_case_ = TransformeraDModel(**a__ ) return model def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = "cpu" snake_case_ = self.dummy_vqvae snake_case_ = self.dummy_text_encoder snake_case_ = self.dummy_tokenizer snake_case_ = self.dummy_transformer snake_case_ = VQDiffusionScheduler(self.num_embed ) snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ ) snake_case_ = VQDiffusionPipeline( vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = "teddy bear playing in the pool" snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" ) snake_case_ = output.images snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe( [prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0] snake_case_ = image[0, -3:, -3:, -1] snake_case_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ = "cpu" snake_case_ = self.dummy_vqvae snake_case_ = self.dummy_text_encoder snake_case_ = self.dummy_tokenizer snake_case_ = self.dummy_transformer snake_case_ = VQDiffusionScheduler(self.num_embed ) snake_case_ = LearnedClassifierFreeSamplingEmbeddings( learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) snake_case_ = VQDiffusionPipeline( vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = "teddy bear playing in the pool" snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" ) snake_case_ = output.images snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe( [prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0] snake_case_ = image[0, -3:, -3:, -1] snake_case_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) snake_case_ = pipeline.to(a__ ) pipeline.set_progress_bar_config(disable=a__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
85
0
from math import sqrt import numpy as np from sympy import symbols # Coefficient # Speed of light (m/s) UpperCamelCase_ = 299792458 # Symbols UpperCamelCase_ = symbols('''ct x y z''') def lowerCamelCase_ ( _a : float ): '''simple docstring''' if velocity > c: raise ValueError("""Speed must not exceed light speed 299,792,458 [m/s]!""" ) elif velocity < 1: # Usually the speed should be much higher than 1 (c order of magnitude) raise ValueError("""Speed must be greater than or equal to 1!""" ) return velocity / c def lowerCamelCase_ ( _a : float ): '''simple docstring''' return 1 / sqrt(1 - beta(_a ) ** 2 ) def lowerCamelCase_ ( _a : float ): '''simple docstring''' return np.array( [ [gamma(_a ), -gamma(_a ) * beta(_a ), 0, 0], [-gamma(_a ) * beta(_a ), gamma(_a ), 0, 0], [0, 0, 1, 0], [0, 0, 0, 1], ] ) def lowerCamelCase_ ( _a : float , _a : np.ndarray | None = None ): '''simple docstring''' if event is None: UpperCAmelCase_ : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector else: event[0] *= c # x0 is ct (speed of light * time) return transformation_matrix(_a ) @ event if __name__ == "__main__": import doctest doctest.testmod() # Example of symbolic vector: UpperCamelCase_ = transform(29979245) print('''Example of four vector: ''') print(F"ct' = {four_vector[0]}") print(F"x' = {four_vector[1]}") print(F"y' = {four_vector[2]}") print(F"z' = {four_vector[3]}") # Substitute symbols with numerical values UpperCamelCase_ = {ct: c, x: 1, y: 1, z: 1} UpperCamelCase_ = [four_vector[i].subs(sub_dict) for i in range(4)] print(F"\n{numerical_vector}")
345
'''simple docstring''' from statistics import mean, stdev def UpperCamelCase_( snake_case : list , snake_case : int = 3 ): '''simple docstring''' snake_case_ = min(snake_case ) snake_case_ = max(snake_case ) # normalize data return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data] def UpperCamelCase_( snake_case : list , snake_case : int = 3 ): '''simple docstring''' snake_case_ = mean(snake_case ) snake_case_ = stdev(snake_case ) # standardize data return [round((x - mu) / (sigma) , snake_case ) for x in data]
85
0
'''simple docstring''' from __future__ import annotations from math import pi from typing import Protocol import matplotlib.pyplot as plt import numpy as np class a_ ( lowercase_ ): '''simple docstring''' def snake_case_( self , A ) -> float: return 0.0 def lowerCamelCase ( __lowerCamelCase : np.ndarray , __lowerCamelCase : int ) ->Dict: _SCREAMING_SNAKE_CASE = min([-20, np.min(fft_results[1 : samplerate // 2 - 1] )] ) _SCREAMING_SNAKE_CASE = max([20, np.max(fft_results[1 : samplerate // 2 - 1] )] ) return lowest, highest def lowerCamelCase ( __lowerCamelCase : FilterType , __lowerCamelCase : int ) ->Tuple: _SCREAMING_SNAKE_CASE = 512 _SCREAMING_SNAKE_CASE = [1] + [0] * (size - 1) _SCREAMING_SNAKE_CASE = [filter_type.process(__lowerCamelCase ) for item in inputs] _SCREAMING_SNAKE_CASE = [0] * (samplerate - size) # zero-padding outputs += filler _SCREAMING_SNAKE_CASE = np.abs(np.fft.fft(__lowerCamelCase ) ) _SCREAMING_SNAKE_CASE = 20 * np.logaa(__lowerCamelCase ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) # Display within reasonable bounds _SCREAMING_SNAKE_CASE = get_bounds(__lowerCamelCase , __lowerCamelCase ) plt.ylim(max([-80, bounds[0]] ) , min([80, bounds[1]] ) ) plt.ylabel("""Gain (dB)""" ) plt.plot(__lowerCamelCase ) plt.show() def lowerCamelCase ( __lowerCamelCase : FilterType , __lowerCamelCase : int ) ->Dict: _SCREAMING_SNAKE_CASE = 512 _SCREAMING_SNAKE_CASE = [1] + [0] * (size - 1) _SCREAMING_SNAKE_CASE = [filter_type.process(__lowerCamelCase ) for item in inputs] _SCREAMING_SNAKE_CASE = [0] * (samplerate - size) # zero-padding outputs += filler _SCREAMING_SNAKE_CASE = np.angle(np.fft.fft(__lowerCamelCase ) ) # Frequencies on log scale from 24 to nyquist frequency plt.xlim(24 , samplerate / 2 - 1 ) plt.xlabel("""Frequency (Hz)""" ) plt.xscale("""log""" ) plt.ylim(-2 * pi , 2 * pi ) plt.ylabel("""Phase shift (Radians)""" ) plt.plot(np.unwrap(__lowerCamelCase , -2 * pi ) ) plt.show()
58
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[Any] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
85
0
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a : Tuple = logging.get_logger(__name__) if is_vision_available(): import PIL class _a ( lowercase_ ): A = ["pixel_values"] def __init__(self, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = 1 / 255, SCREAMING_SNAKE_CASE_ = True, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = True, **SCREAMING_SNAKE_CASE_, ) -> None: super().__init__(**a__ ) UpperCAmelCase_: List[Any] = size if size is not None else {"""shortest_edge""": 224} UpperCAmelCase_: Optional[int] = get_size_dict(a__, default_to_square=a__ ) UpperCAmelCase_: str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} UpperCAmelCase_: List[Any] = get_size_dict(a__, default_to_square=a__, param_name="""crop_size""" ) UpperCAmelCase_: Tuple = do_resize UpperCAmelCase_: List[Any] = size UpperCAmelCase_: Optional[Any] = resample UpperCAmelCase_: int = do_center_crop UpperCAmelCase_: Dict = crop_size UpperCAmelCase_: int = do_rescale UpperCAmelCase_: Dict = rescale_factor UpperCAmelCase_: Optional[int] = do_normalize UpperCAmelCase_: List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN UpperCAmelCase_: int = image_std if image_std is not None else OPENAI_CLIP_STD UpperCAmelCase_: str = do_convert_rgb def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = PILImageResampling.BICUBIC, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray: UpperCAmelCase_: Any = get_size_dict(a__, default_to_square=a__ ) if "shortest_edge" not in size: raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' ) UpperCAmelCase_: str = get_resize_output_image_size(a__, size=size["""shortest_edge"""], default_to_square=a__ ) return resize(a__, size=a__, resample=a__, data_format=a__, **a__ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray: UpperCAmelCase_: str = get_size_dict(a__ ) if "height" not in size or "width" not in size: raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' ) return center_crop(a__, size=(size["""height"""], size["""width"""]), data_format=a__, **a__ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> Tuple: return rescale(a__, scale=a__, data_format=a__, **a__ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, **SCREAMING_SNAKE_CASE_, ) -> np.ndarray: return normalize(a__, mean=a__, std=a__, data_format=a__, **a__ ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = None, SCREAMING_SNAKE_CASE_ = ChannelDimension.FIRST, **SCREAMING_SNAKE_CASE_, ) -> PIL.Image.Image: UpperCAmelCase_: int = do_resize if do_resize is not None else self.do_resize UpperCAmelCase_: Optional[int] = size if size is not None else self.size UpperCAmelCase_: Any = get_size_dict(a__, param_name="""size""", default_to_square=a__ ) UpperCAmelCase_: str = resample if resample is not None else self.resample UpperCAmelCase_: List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop UpperCAmelCase_: List[Any] = crop_size if crop_size is not None else self.crop_size UpperCAmelCase_: Optional[int] = get_size_dict(a__, param_name="""crop_size""", default_to_square=a__ ) UpperCAmelCase_: Any = do_rescale if do_rescale is not None else self.do_rescale UpperCAmelCase_: Any = rescale_factor if rescale_factor is not None else self.rescale_factor UpperCAmelCase_: Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize UpperCAmelCase_: int = image_mean if image_mean is not None else self.image_mean UpperCAmelCase_: List[str] = image_std if image_std is not None else self.image_std UpperCAmelCase_: Optional[int] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb UpperCAmelCase_: Optional[int] = make_list_of_images(a__ ) if not valid_images(a__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: UpperCAmelCase_: str = [convert_to_rgb(a__ ) for image in images] # All transformations expect numpy arrays. UpperCAmelCase_: Tuple = [to_numpy_array(a__ ) for image in images] if do_resize: UpperCAmelCase_: Tuple = [self.resize(image=a__, size=a__, resample=a__ ) for image in images] if do_center_crop: UpperCAmelCase_: Optional[Any] = [self.center_crop(image=a__, size=a__ ) for image in images] if do_rescale: UpperCAmelCase_: List[str] = [self.rescale(image=a__, scale=a__ ) for image in images] if do_normalize: UpperCAmelCase_: int = [self.normalize(image=a__, mean=a__, std=a__ ) for image in images] UpperCAmelCase_: Optional[int] = [to_channel_dimension_format(a__, a__ ) for image in images] UpperCAmelCase_: Optional[int] = {"""pixel_values""": images} return BatchFeature(data=a__, tensor_type=a__ )
147
'''simple docstring''' def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ): '''simple docstring''' snake_case_ = [0 for i in range(r + 1 )] # nc0 = 1 snake_case_ = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. snake_case_ = min(snake_case , snake_case ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
85
0
import operator as op _UpperCAmelCase = "scaler.pt" _UpperCAmelCase = "pytorch_model" _UpperCAmelCase = "random_states" _UpperCAmelCase = "optimizer" _UpperCAmelCase = "scheduler" _UpperCAmelCase = "pytorch_model.bin" _UpperCAmelCase = "pytorch_model.bin.index.json" _UpperCAmelCase = "model.safetensors" _UpperCAmelCase = "model.safetensors.index.json" _UpperCAmelCase = "1.10.2" _UpperCAmelCase = "py38" _UpperCAmelCase = "4.17.0" _UpperCAmelCase = ["ml.p3.16xlarge", "ml.p3dn.24xlarge", "ml.p4dn.24xlarge"] _UpperCAmelCase = ["FULL_SHARD", "SHARD_GRAD_OP", "NO_SHARD", "HYBRID_SHARD", "HYBRID_SHARD_ZERO2"] _UpperCAmelCase = ["TRANSFORMER_BASED_WRAP", "SIZE_BASED_WRAP", "NO_WRAP"] _UpperCAmelCase = ["BACKWARD_PRE", "BACKWARD_POST", "NO_PREFETCH"] _UpperCAmelCase = ["FULL_STATE_DICT", "LOCAL_STATE_DICT", "SHARDED_STATE_DICT"] _UpperCAmelCase = "2.0.1" _UpperCAmelCase = ["pdsh", "standard", "openmpi", "mvapich"] _UpperCAmelCase = ["default", "reduce-overhead", "max-autotune"] _UpperCAmelCase = {">": op.gt, ">=": op.ge, "==": op.eq, "!=": op.ne, "<=": op.le, "<": op.lt} # These are the args for `torch.distributed.launch` for pytorch < 1.9 _UpperCAmelCase = [ "nnodes", "nproc_per_node", "rdzv_backend", "rdzv_endpoint", "rdzv_id", "rdzv_conf", "standalone", "max_restarts", "monitor_interval", "start_method", "role", "module", "m", "no_python", "run_path", "log_dir", "r", "redirects", "t", "tee", "node_rank", "master_addr", "master_port", ] _UpperCAmelCase = ["DEEPSPEED", "MULTI_GPU", "FSDP", "MEGATRON_LM"] _UpperCAmelCase = ["DEEPSPEED", "MULTI_XPU", "FSDP"]
140
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE : Tuple = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"] _SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Dict = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" from typing import Any, Dict, Optional import torch import torch.nn.functional as F from torch import nn from ..utils import maybe_allow_in_graph from .activations import get_activation from .attention_processor import Attention from .embeddings import CombinedTimestepLabelEmbeddings @maybe_allow_in_graph class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Dict , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Any=0.0 , lowercase_ : Union[str, Any] = None , lowercase_ : List[Any] = "geglu" , lowercase_ : List[Any] = None , lowercase_ : Any = False , lowercase_ : int = False , lowercase_ : Optional[Any] = False , lowercase_ : Union[str, Any] = False , lowercase_ : Optional[Any] = True , lowercase_ : int = "layer_norm" , lowercase_ : List[Any] = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : Any = only_cross_attention SCREAMING_SNAKE_CASE_ : List[str] = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm_zero''' SCREAMING_SNAKE_CASE_ : Tuple = (num_embeds_ada_norm is not None) and norm_type == '''ada_norm''' if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None: raise ValueError( F'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to' F' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.') # Define 3 blocks. Each block has its own normalization layer. # 1. Self-Attn if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE_ : Tuple = AdaLayerNorm(a__ , a__) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE_ : Tuple = AdaLayerNormZero(a__ , a__) else: SCREAMING_SNAKE_CASE_ : Any = nn.LayerNorm(a__ , elementwise_affine=a__) SCREAMING_SNAKE_CASE_ : List[str] = Attention( query_dim=a__ , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=a__ , ) # 2. Cross-Attn if cross_attention_dim is not None or double_self_attention: # We currently only use AdaLayerNormZero for self attention where there will only be one attention block. # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during # the second cross attention block. SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( AdaLayerNorm(a__ , a__) if self.use_ada_layer_norm else nn.LayerNorm(a__ , elementwise_affine=a__) ) SCREAMING_SNAKE_CASE_ : Any = Attention( query_dim=a__ , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=a__ , dim_head=a__ , dropout=a__ , bias=a__ , upcast_attention=a__ , ) # is self-attn if encoder_hidden_states is none else: SCREAMING_SNAKE_CASE_ : Dict = None SCREAMING_SNAKE_CASE_ : int = None # 3. Feed-forward SCREAMING_SNAKE_CASE_ : Optional[Any] = nn.LayerNorm(a__ , elementwise_affine=a__) SCREAMING_SNAKE_CASE_ : List[str] = FeedForward(a__ , dropout=a__ , activation_fn=a__ , final_dropout=a__) # let chunk size default to None SCREAMING_SNAKE_CASE_ : str = None SCREAMING_SNAKE_CASE_ : str = 0 def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any] , lowercase_ : List[str]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : List[str] = chunk_size SCREAMING_SNAKE_CASE_ : int = dim def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : List[str] , lowercase_ : Optional[int] = None , lowercase_ : Union[str, Any] = None , lowercase_ : Any = None , lowercase_ : List[Any] = None , lowercase_ : List[Any] = None , lowercase_ : Any = None , ): '''simple docstring''' if self.use_ada_layer_norm: SCREAMING_SNAKE_CASE_ : List[Any] = self.norma(a__ , a__) elif self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Optional[Any] = self.norma( a__ , a__ , a__ , hidden_dtype=hidden_states.dtype) else: SCREAMING_SNAKE_CASE_ : List[Any] = self.norma(a__) SCREAMING_SNAKE_CASE_ : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {} SCREAMING_SNAKE_CASE_ : List[Any] = self.attna( a__ , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=a__ , **a__ , ) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE_ : List[Any] = gate_msa.unsqueeze(1) * attn_output SCREAMING_SNAKE_CASE_ : int = attn_output + hidden_states # 2. Cross-Attention if self.attna is not None: SCREAMING_SNAKE_CASE_ : Union[str, Any] = ( self.norma(a__ , a__) if self.use_ada_layer_norm else self.norma(a__) ) SCREAMING_SNAKE_CASE_ : int = self.attna( a__ , encoder_hidden_states=a__ , attention_mask=a__ , **a__ , ) SCREAMING_SNAKE_CASE_ : Optional[int] = attn_output + hidden_states # 3. Feed-forward SCREAMING_SNAKE_CASE_ : List[str] = self.norma(a__) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE_ : Optional[int] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None] if self._chunk_size is not None: # "feed_forward_chunk_size" can be used to save memory if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0: raise ValueError( F'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.') SCREAMING_SNAKE_CASE_ : str = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size SCREAMING_SNAKE_CASE_ : str = torch.cat( [self.ff(a__) for hid_slice in norm_hidden_states.chunk(a__ , dim=self._chunk_dim)] , dim=self._chunk_dim , ) else: SCREAMING_SNAKE_CASE_ : Optional[int] = self.ff(a__) if self.use_ada_layer_norm_zero: SCREAMING_SNAKE_CASE_ : Optional[Any] = gate_mlp.unsqueeze(1) * ff_output SCREAMING_SNAKE_CASE_ : Optional[Any] = ff_output + hidden_states return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : int = None , lowercase_ : Optional[Any] = 4 , lowercase_ : Union[str, Any] = 0.0 , lowercase_ : List[Any] = "geglu" , lowercase_ : Dict = False , ): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : Dict = int(dim * mult) SCREAMING_SNAKE_CASE_ : Optional[Any] = dim_out if dim_out is not None else dim if activation_fn == "gelu": SCREAMING_SNAKE_CASE_ : List[Any] = GELU(a__ , a__) if activation_fn == "gelu-approximate": SCREAMING_SNAKE_CASE_ : int = GELU(a__ , a__ , approximate='''tanh''') elif activation_fn == "geglu": SCREAMING_SNAKE_CASE_ : int = GEGLU(a__ , a__) elif activation_fn == "geglu-approximate": SCREAMING_SNAKE_CASE_ : Dict = ApproximateGELU(a__ , a__) SCREAMING_SNAKE_CASE_ : List[str] = nn.ModuleList([]) # project in self.net.append(a__) # project dropout self.net.append(nn.Dropout(a__)) # project out self.net.append(nn.Linear(a__ , a__)) # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout if final_dropout: self.net.append(nn.Dropout(a__)) def _SCREAMING_SNAKE_CASE ( self : str , lowercase_ : Optional[Any]): '''simple docstring''' for module in self.net: SCREAMING_SNAKE_CASE_ : Any = module(a__) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : int = "none"): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : Dict = nn.Linear(a__ , a__) SCREAMING_SNAKE_CASE_ : Tuple = approximate def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : Optional[Any]): '''simple docstring''' if gate.device.type != "mps": return F.gelu(a__ , approximate=self.approximate) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype) def _SCREAMING_SNAKE_CASE ( self : List[str] , lowercase_ : Tuple): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.proj(a__) SCREAMING_SNAKE_CASE_ : int = self.gelu(a__) return hidden_states class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : int , lowercase_ : int): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = nn.Linear(a__ , dim_out * 2) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Dict): '''simple docstring''' if gate.device.type != "mps": return F.gelu(a__) # mps: gelu is not implemented for float16 return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowercase_ : str): '''simple docstring''' SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = self.proj(a__).chunk(2 , dim=-1) return hidden_states * self.gelu(a__) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : str): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = nn.Linear(a__ , a__) def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : int): '''simple docstring''' SCREAMING_SNAKE_CASE_ : int = self.proj(a__) return x * torch.sigmoid(1.7_02 * x) class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Dict): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : Any = nn.Embedding(a__ , a__) SCREAMING_SNAKE_CASE_ : Any = nn.SiLU() SCREAMING_SNAKE_CASE_ : Optional[int] = nn.Linear(a__ , embedding_dim * 2) SCREAMING_SNAKE_CASE_ : int = nn.LayerNorm(a__ , elementwise_affine=a__) def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowercase_ : List[str] , lowercase_ : List[Any]): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Optional[int] = self.linear(self.silu(self.emb(a__))) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = torch.chunk(a__ , 2) SCREAMING_SNAKE_CASE_ : Tuple = self.norm(a__) * (1 + scale) + shift return x class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : int , lowercase_ : Optional[int] , lowercase_ : List[Any]): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : str = CombinedTimestepLabelEmbeddings(a__ , a__) SCREAMING_SNAKE_CASE_ : Tuple = nn.SiLU() SCREAMING_SNAKE_CASE_ : Tuple = nn.Linear(a__ , 6 * embedding_dim , bias=a__) SCREAMING_SNAKE_CASE_ : List[str] = nn.LayerNorm(a__ , elementwise_affine=a__ , eps=1e-6) def _SCREAMING_SNAKE_CASE ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any] , lowercase_ : Any=None): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Dict = self.linear(self.silu(self.emb(a__ , a__ , hidden_dtype=a__))) SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = emb.chunk(6 , dim=1) SCREAMING_SNAKE_CASE_ : str = self.norm(a__) * (1 + scale_msa[:, None]) + shift_msa[:, None] return x, gate_msa, shift_mlp, scale_mlp, gate_mlp class lowerCAmelCase__ ( nn.Module ): '''simple docstring''' def __init__( self : Optional[int] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Union[str, Any] , lowercase_ : Tuple = None , lowercase_ : Dict = 1e-5): '''simple docstring''' super().__init__() SCREAMING_SNAKE_CASE_ : List[Any] = num_groups SCREAMING_SNAKE_CASE_ : Dict = eps if act_fn is None: SCREAMING_SNAKE_CASE_ : Dict = None else: SCREAMING_SNAKE_CASE_ : List[Any] = get_activation(a__) SCREAMING_SNAKE_CASE_ : Union[str, Any] = nn.Linear(a__ , out_dim * 2) def _SCREAMING_SNAKE_CASE ( self : Dict , lowercase_ : List[str] , lowercase_ : str): '''simple docstring''' if self.act: SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.act(a__) SCREAMING_SNAKE_CASE_ : List[Any] = self.linear(a__) SCREAMING_SNAKE_CASE_ : Optional[int] = emb[:, :, None, None] SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict = emb.chunk(2 , dim=1) SCREAMING_SNAKE_CASE_ : Union[str, Any] = F.group_norm(a__ , self.num_groups , eps=self.eps) SCREAMING_SNAKE_CASE_ : Optional[Any] = x * (1 + scale) + shift return x
91
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) _SCREAMING_SNAKE_CASE : int = parser.parse_args() _SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor() _SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") _SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
0
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class A_ (unittest.TestCase ): def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = '''| <pad> <unk> <s> </s> a b c d e f g h i j k'''.split() UpperCAmelCase = dict(zip(a__ , range(len(a__ ) ) ) ) UpperCAmelCase = { '''unk_token''': '''<unk>''', '''bos_token''': '''<s>''', '''eos_token''': '''</s>''', } UpperCAmelCase = { '''feature_size''': 1, '''padding_value''': 0.0, '''sampling_rate''': 1_6_0_0_0, '''return_attention_mask''': False, '''do_normalize''': True, } UpperCAmelCase = tempfile.mkdtemp() UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) UpperCAmelCase = os.path.join(self.tmpdirname , a__ ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a__ ) + '''\n''' ) with open(self.feature_extraction_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(a__ ) + '''\n''' ) # load decoder from hub UpperCAmelCase = '''hf-internal-testing/ngram-beam-search-decoder''' def _lowercase ( self , **_A ): '''simple docstring''' UpperCAmelCase = self.add_kwargs_tokens_map.copy() kwargs.update(a__ ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **a__ ) def _lowercase ( self , **_A ): '''simple docstring''' return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **a__ ) def _lowercase ( self , **_A ): '''simple docstring''' return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **a__ ) def _lowercase ( self ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , a__ ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , a__ ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , a__ ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(['''xx'''] ) with self.assertRaisesRegex(a__ , '''include''' ): WavaVecaProcessorWithLM( tokenizer=a__ , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = floats_list((3, 1_0_0_0) ) UpperCAmelCase = feature_extractor(a__ , return_tensors='''np''' ) UpperCAmelCase = processor(a__ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = '''This is a test string''' UpperCAmelCase = processor(text=a__ ) UpperCAmelCase = tokenizer(a__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self , _A=(2, 1_0, 1_6) , _A=7_7 ): '''simple docstring''' np.random.seed(a__ ) return np.random.rand(*a__ ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits(shape=(1_0, 1_6) , seed=1_3 ) UpperCAmelCase = processor.decode(a__ ) UpperCAmelCase = decoder.decode_beams(a__ )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual('''</s> <s> </s>''' , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ['''fork'''], ['''spawn''']] ) def _lowercase ( self , _A ): '''simple docstring''' UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: UpperCAmelCase = processor.batch_decode(a__ ) else: with get_context(a__ ).Pool() as pool: UpperCAmelCase = processor.batch_decode(a__ , a__ ) UpperCAmelCase = list(a__ ) with get_context('''fork''' ).Pool() as p: UpperCAmelCase = decoder.decode_beams_batch(a__ , a__ ) UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(a__ , decoded_processor.text ) self.assertListEqual(['''<s> <s> </s>''', '''<s> <s> <s>'''] , decoded_processor.text ) self.assertListEqual(a__ , decoded_processor.logit_score ) self.assertListEqual(a__ , decoded_processor.lm_score ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = 1_5 UpperCAmelCase = -2_0.0 UpperCAmelCase = -4.0 UpperCAmelCase = processor.batch_decode( a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , ) UpperCAmelCase = decoded_processor_out.text UpperCAmelCase = list(a__ ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase = decoder.decode_beams_batch( a__ , a__ , beam_width=a__ , beam_prune_logp=a__ , token_min_logp=a__ , ) UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] UpperCAmelCase = [d[0][2] for d in decoded_decoder_out] UpperCAmelCase = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(a__ , a__ ) self.assertListEqual(['''</s> <s> <s>''', '''<s> <s> <s>'''] , a__ ) self.assertTrue(np.array_equal(a__ , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-2_0.0_5_4, -1_8.4_4_7] , a__ , atol=1E-3 ) ) self.assertTrue(np.array_equal(a__ , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-1_5.5_5_4, -1_3.9_4_7_4] , a__ , atol=1E-3 ) ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = 2.0 UpperCAmelCase = 5.0 UpperCAmelCase = -2_0.0 UpperCAmelCase = True UpperCAmelCase = processor.batch_decode( a__ , alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , ) UpperCAmelCase = decoded_processor_out.text UpperCAmelCase = list(a__ ) decoder.reset_params( alpha=a__ , beta=a__ , unk_score_offset=a__ , lm_score_boundary=a__ , ) with get_context('''fork''' ).Pool() as pool: UpperCAmelCase = decoder.decode_beams_batch( a__ , a__ , ) UpperCAmelCase = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(a__ , a__ ) self.assertListEqual(['''<s> </s> <s> </s> </s>''', '''</s> </s> <s> </s> </s>'''] , a__ ) UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -2_0.0 ) self.assertEqual(lm_model.score_boundary , a__ ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = ['''alphabet.json''', '''language_model'''] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(a__ , a__ ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = snapshot_download('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained(a__ ) UpperCAmelCase = processor.decoder.model_container[processor.decoder._model_key] UpperCAmelCase = Path(language_model._kenlm_model.path.decode('''utf-8''' ) ).parent.parent.absolute() UpperCAmelCase = os.listdir(a__ ) UpperCAmelCase = os.listdir(a__ ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(a__ , a__ ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = AutoProcessor.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = floats_list((3, 1_0_0_0) ) UpperCAmelCase = processor_wavaveca(a__ , return_tensors='''np''' ) UpperCAmelCase = processor_auto(a__ , return_tensors='''np''' ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1E-2 ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = processor_wavaveca.batch_decode(a__ ) UpperCAmelCase = processor_auto.batch_decode(a__ ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = self.get_feature_extractor() UpperCAmelCase = self.get_tokenizer() UpperCAmelCase = self.get_decoder() UpperCAmelCase = WavaVecaProcessorWithLM(tokenizer=a__ , feature_extractor=a__ , decoder=a__ ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg='''`processor` and `feature_extractor` model input names do not match''' , ) @staticmethod def _lowercase ( _A , _A ): '''simple docstring''' UpperCAmelCase = [d[key] for d in offsets] return retrieved_list def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = self._get_dummy_logits()[0] UpperCAmelCase = processor.decode(a__ , output_word_offsets=a__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(a__ , a__ ) ) self.assertEqual(''' '''.join(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''] , '''end_offset''' ) , [1, 3, 5] ) def _lowercase ( self ): '''simple docstring''' UpperCAmelCase = WavaVecaProcessorWithLM.from_pretrained('''hf-internal-testing/processor_with_lm''' ) UpperCAmelCase = self._get_dummy_logits() UpperCAmelCase = processor.batch_decode(a__ , output_word_offsets=a__ ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue('''text''' in outputs ) self.assertTrue('''word_offsets''' in outputs ) self.assertTrue(isinstance(a__ , a__ ) ) self.assertListEqual( [''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) for o in outputs['''word_offsets''']] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''word''' ) , ['''<s>''', '''<s>''', '''</s>'''] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''start_offset''' ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs['''word_offsets'''][0] , '''end_offset''' ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def _lowercase ( self ): '''simple docstring''' import torch UpperCAmelCase = load_dataset('''common_voice''' , '''en''' , split='''train''' , streaming=a__ ) UpperCAmelCase = ds.cast_column('''audio''' , datasets.Audio(sampling_rate=1_6_0_0_0 ) ) UpperCAmelCase = iter(a__ ) UpperCAmelCase = next(a__ ) UpperCAmelCase = AutoProcessor.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) UpperCAmelCase = WavaVecaForCTC.from_pretrained('''patrickvonplaten/wav2vec2-base-100h-with-lm''' ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train UpperCAmelCase = processor(sample['''audio''']['''array'''] , return_tensors='''pt''' ).input_values with torch.no_grad(): UpperCAmelCase = model(a__ ).logits.cpu().numpy() UpperCAmelCase = processor.decode(logits[0] , output_word_offsets=a__ ) UpperCAmelCase = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate UpperCAmelCase = [ { '''start_time''': d['''start_offset'''] * time_offset, '''end_time''': d['''end_offset'''] * time_offset, '''word''': d['''word'''], } for d in output['''word_offsets'''] ] UpperCAmelCase = '''WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL''' # output words self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , a__ ) self.assertEqual(''' '''.join(self.get_from_offsets(a__ , '''word''' ) ) , output.text ) # output times UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''start_time''' ) ) UpperCAmelCase = torch.tensor(self.get_from_offsets(a__ , '''end_time''' ) ) # fmt: off UpperCAmelCase = torch.tensor([1.41_99, 1.65_99, 2.25_99, 3.0, 3.24, 3.59_99, 3.79_99, 4.09_99, 4.26, 4.94, 5.28, 5.65_99, 5.78, 5.94, 6.32, 6.53_99, 6.65_99] ) UpperCAmelCase = torch.tensor([1.53_99, 1.89_99, 2.9, 3.16, 3.53_99, 3.72, 4.01_99, 4.17_99, 4.76, 5.15_99, 5.55_99, 5.69_99, 5.86, 6.19_99, 6.38, 6.61_99, 6.94] ) # fmt: on self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) ) self.assertTrue(torch.allclose(a__ , a__ , atol=0.01 ) )
273
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) class _snake_case ( lowercase_ ): lowerCAmelCase_ : Any = "upernet" def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**a__ ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(a__ , a__ ): snake_case_ = backbone_config.get("model_type" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(a__ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
85
0
'''simple docstring''' import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features _UpperCamelCase : Tuple = logging.get_logger(__name__) _UpperCamelCase : List[str] = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) _UpperCamelCase : Tuple = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class snake_case__ : a_ = field( default=lowercase_ , metadata={"help": "Model type selected in the list: " + ", ".join(lowercase_)}) a_ = field( default=lowercase_ , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."}) a_ = field( default=128 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) a_ = field( default=128 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) a_ = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) a_ = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) a_ = field( default=lowercase_ , metadata={"help": "Overwrite the cached training and evaluation sets"}) a_ = field( default=lowercase_ , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."}) a_ = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) a_ = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."}) a_ = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) a_ = field(default=1 , metadata={"help": "multiple threads for converting example to features"}) class snake_case__ ( lowercase_): a_ = "train" a_ = "dev" class snake_case__ ( lowercase_): a_ = 42 a_ = 42 a_ = 42 a_ = 42 def __init__( self : int , _A : Union[str, Any] , _A : Any , _A : Optional[Any] = None , _A : Any = Split.train , _A : Optional[int] = False , _A : Optional[Any] = None , _A : Any = "pt" , ) -> Any: UpperCAmelCase_ : Optional[int] = args UpperCAmelCase_ : Optional[Any] = is_language_sensitive UpperCAmelCase_ : Any = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(a__ , a__ ): try: UpperCAmelCase_ : str = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) UpperCAmelCase_ : Union[str, Any] = mode # Load data features from cache or dataset file UpperCAmelCase_ : Optional[Any] = '''v2''' if args.version_2_with_negative else '''v1''' UpperCAmelCase_ : str = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. UpperCAmelCase_ : Any = cached_features_file + '''.lock''' with FileLock(a__ ): if os.path.exists(a__ ) and not args.overwrite_cache: UpperCAmelCase_ : Dict = time.time() UpperCAmelCase_ : Optional[int] = torch.load(a__ ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. UpperCAmelCase_ : int = self.old_features['''features'''] UpperCAmelCase_ : Tuple = self.old_features.get('''dataset''' , a__ ) UpperCAmelCase_ : List[Any] = self.old_features.get('''examples''' , a__ ) logger.info( F"Loading features from cached file {cached_features_file} [took %.3f s]" , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( F"Deleting cached file {cached_features_file} will allow dataset and examples to be cached in" ''' future run''' ) else: if mode == Split.dev: UpperCAmelCase_ : Union[str, Any] = self.processor.get_dev_examples(args.data_dir ) else: UpperCAmelCase_ : Tuple = self.processor.get_train_examples(args.data_dir ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = squad_convert_examples_to_features( examples=self.examples , tokenizer=a__ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a__ , ) UpperCAmelCase_ : Tuple = time.time() torch.save( {'''features''': self.features, '''dataset''': self.dataset, '''examples''': self.examples} , a__ , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]" ) def __len__( self : Optional[int] ) -> str: return len(self.features ) def __getitem__( self : Optional[int] , _A : Tuple ) -> Dict[str, torch.Tensor]: UpperCAmelCase_ : List[Any] = self.features[i] UpperCAmelCase_ : Any = torch.tensor(feature.input_ids , dtype=torch.long ) UpperCAmelCase_ : List[str] = torch.tensor(feature.attention_mask , dtype=torch.long ) UpperCAmelCase_ : List[str] = torch.tensor(feature.token_type_ids , dtype=torch.long ) UpperCAmelCase_ : int = torch.tensor(feature.cls_index , dtype=torch.long ) UpperCAmelCase_ : List[Any] = torch.tensor(feature.p_mask , dtype=torch.float ) UpperCAmelCase_ : Dict = torch.tensor(feature.is_impossible , dtype=torch.float ) UpperCAmelCase_ : str = { '''input_ids''': input_ids, '''attention_mask''': attention_mask, '''token_type_ids''': token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({'''cls_index''': cls_index, '''p_mask''': p_mask} ) if self.args.version_2_with_negative: inputs.update({'''is_impossible''': is_impossible} ) if self.is_language_sensitive: inputs.update({'''langs''': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: UpperCAmelCase_ : List[str] = torch.tensor(feature.start_position , dtype=torch.long ) UpperCAmelCase_ : Dict = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({'''start_positions''': start_positions, '''end_positions''': end_positions} ) return inputs
304
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def UpperCamelCase_( snake_case : Any ): '''simple docstring''' if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def UpperCamelCase_( snake_case : str ): '''simple docstring''' for char in word: snake_case_ = ord(snake_case ) if not _is_chinese_char(snake_case ): return 0 return 1 def UpperCamelCase_( snake_case : List[str] ): '''simple docstring''' snake_case_ = set() for token in tokens: snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case ) if chinese_word: word_set.add(snake_case ) snake_case_ = list(snake_case ) return word_list def UpperCamelCase_( snake_case : List[str] , snake_case : set() ): '''simple docstring''' if not chinese_word_set: return bert_tokens snake_case_ = max([len(snake_case ) for w in chinese_word_set] ) snake_case_ = bert_tokens snake_case_ , snake_case_ = 0, len(snake_case ) while start < end: snake_case_ = True if is_chinese(bert_word[start] ): snake_case_ = min(end - start , snake_case ) for i in range(snake_case , 1 , -1 ): snake_case_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): snake_case_ = "##" + bert_word[j] snake_case_ = start + i snake_case_ = False break if single_word: start += 1 return bert_word def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ): '''simple docstring''' snake_case_ = [] for i in range(0 , len(snake_case ) , 1_0_0 ): snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws snake_case_ = [get_chinese_word(snake_case ) for r in res] ltp_res.extend(snake_case ) assert len(snake_case ) == len(snake_case ) snake_case_ = [] for i in range(0 , len(snake_case ) , 1_0_0 ): snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 ) bert_res.extend(res["input_ids"] ) assert len(snake_case ) == len(snake_case ) snake_case_ = [] for input_ids, chinese_word in zip(snake_case , snake_case ): snake_case_ = [] for id in input_ids: snake_case_ = bert_tokenizer._convert_id_to_token(snake_case ) input_tokens.append(snake_case ) snake_case_ = add_sub_symbol(snake_case , snake_case ) snake_case_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(snake_case ): if token[:2] == "##": snake_case_ = token[2:] # save chinese tokens' pos if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ): ref_id.append(snake_case ) ref_ids.append(snake_case ) assert len(snake_case ) == len(snake_case ) return ref_ids def UpperCamelCase_( snake_case : Any ): '''simple docstring''' with open(args.file_name , "r" , encoding="utf-8" ) as f: snake_case_ = f.readlines() snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' snake_case_ = LTP(args.ltp ) # faster in GPU device snake_case_ = BertTokenizer.from_pretrained(args.bert ) snake_case_ = prepare_ref(snake_case , snake_case , snake_case ) with open(args.save_path , "w" , encoding="utf-8" ) as f: snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids] f.writelines(snake_case ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) _SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() main(args)
85
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowercase__ = logging.get_logger(__name__) lowercase__ = {"vocab_file": "sentencepiece.model"} lowercase__ = { "vocab_file": { "google/rembert": "https://huggingface.co/google/rembert/resolve/main/sentencepiece.model", }, } lowercase__ = { "google/rembert": 256, } class __snake_case ( lowercase_ ): a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self , lowercase , lowercase=False , lowercase=True , lowercase=True , lowercase="[CLS]" , lowercase="[SEP]" , lowercase="[UNK]" , lowercase="[SEP]" , lowercase="[PAD]" , lowercase="[CLS]" , lowercase="[MASK]" , **lowercase , ) -> Tuple: '''simple docstring''' super().__init__( do_lower_case=a__ , remove_space=a__ , keep_accents=a__ , bos_token=a__ , eos_token=a__ , unk_token=a__ , sep_token=a__ , pad_token=a__ , cls_token=a__ , mask_token=a__ , **a__ , ) a__: Tuple = do_lower_case a__: int = remove_space a__: int = keep_accents a__: Union[str, Any] = vocab_file a__: List[Any] = spm.SentencePieceProcessor() self.sp_model.Load(a__) @property def lowerCamelCase_ ( self) -> Optional[int]: '''simple docstring''' return len(self.sp_model) def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: Dict = {self.convert_ids_to_tokens(a__): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self) -> Optional[int]: '''simple docstring''' a__: Any = self.__dict__.copy() a__: Dict = None return state def __setstate__( self , lowercase) -> Optional[Any]: '''simple docstring''' a__: List[str] = d a__: int = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def lowerCamelCase_ ( self , lowercase , lowercase=False) -> int: '''simple docstring''' a__: str = self.sp_model.EncodeAsPieces(a__) return pieces def lowerCamelCase_ ( self , lowercase) -> Tuple: '''simple docstring''' return self.sp_model.PieceToId(a__) def lowerCamelCase_ ( self , lowercase) -> List[Any]: '''simple docstring''' return self.sp_model.IdToPiece(a__) def lowerCamelCase_ ( self , lowercase) -> List[str]: '''simple docstring''' a__: List[Any] = self.sp_model.decode_pieces(a__) return out_string def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]: '''simple docstring''' a__: Union[str, Any] = [self.sep_token_id] a__: Optional[Any] = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase_ ( self , lowercase , lowercase = None , lowercase = False) -> List[int]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.') return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(a__)) + [1] + ([0] * len(a__)) + [1] return [1] + ([0] * len(a__)) + [1] def lowerCamelCase_ ( self , lowercase , lowercase = None) -> List[int]: '''simple docstring''' a__: Union[str, Any] = [self.sep_token_id] a__: int = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def lowerCamelCase_ ( self , lowercase , lowercase = None) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(a__): logger.error('Vocabulary path ({}) should be a directory'.format(a__)) return a__: Optional[int] = os.path.join( a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(a__): copyfile(self.vocab_file , a__) return (out_vocab_file,)
290
'''simple docstring''' import warnings from functools import wraps from typing import Callable def UpperCamelCase_( snake_case : Callable ): '''simple docstring''' @wraps(snake_case ) def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ): warnings.warn( (f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , ) return fn(*snake_case , **snake_case ) return _inner_fn
85
0
import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin lowercase : List[Any] = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n" class A__ ( unittest.TestCase , lowercase_ ): """simple docstring""" def __lowercase ( self) -> Union[str, Any]: '''simple docstring''' a__ : Any = load_tool('text-question-answering') self.tool.setup() a__ : Any = load_tool('text-question-answering' , remote=a__) def __lowercase ( self) -> int: '''simple docstring''' a__ : str = self.tool(a__ , 'What did Hugging Face do in April 2021?') self.assertEqual(a__ , 'launched the BigScience Research Workshop') def __lowercase ( self) -> List[Any]: '''simple docstring''' a__ : List[str] = self.remote_tool(a__ , 'What did Hugging Face do in April 2021?') self.assertEqual(a__ , 'launched the BigScience Research Workshop') def __lowercase ( self) -> str: '''simple docstring''' a__ : Dict = self.tool(text=a__ , question='What did Hugging Face do in April 2021?') self.assertEqual(a__ , 'launched the BigScience Research Workshop') def __lowercase ( self) -> List[str]: '''simple docstring''' a__ : str = self.remote_tool(text=a__ , question='What did Hugging Face do in April 2021?') self.assertEqual(a__ , 'launched the BigScience Research Workshop')
99
'''simple docstring''' from __future__ import annotations import requests def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case ).json() def UpperCamelCase_( snake_case : int = 1_0 ): '''simple docstring''' snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" snake_case_ = requests.get(snake_case ).json()[:max_stories] return [get_hackernews_story(snake_case ) for story_id in story_ids] def UpperCamelCase_( snake_case : int = 1_0 ): '''simple docstring''' snake_case_ = hackernews_top_stories(snake_case ) return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
85
0
from __future__ import annotations import os import tempfile import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import is_tensorflow_text_available, is_tf_available from transformers.testing_utils import require_tensorflow_text, require_tf, slow from ..test_modeling_tf_common import floats_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_tf_available(): import tensorflow as tf from transformers import ( AutoTokenizer, TFAutoModelForCausalLM, TFAutoModelForSeqaSeqLM, TFAutoModelForSpeechSeqaSeq, TFAutoModelForVisionaSeq, TFBartForConditionalGeneration, TFLogitsProcessorList, TFMinLengthLogitsProcessor, tf_top_k_top_p_filtering, ) if is_tensorflow_text_available(): import tensorflow_text as text @require_tf class _lowerCamelCase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self )->Union[str, Any]: '''simple docstring''' A_ : Optional[Any] = tf.convert_to_tensor( [ [ 8.2_2_2_0_9_9_1, # 3rd highest value; idx. 0 -0.5_6_2_0_0_4_4, 5.2_3_2_2_9_7_5_2, 4.0_3_8_6_3_9_3, -6.8_7_9_8_3_7_8, -0.5_4_7_8_5_8_0_2, -3.2_0_1_2_1_5_3, 2.9_2_7_7_7_1_7_6, 1.8_8_1_7_1_9_5_3, 7.3_5_3_4_1_2_7_6, # 5th highest value; idx. 9 8.4_3_2_0_7_8_3_3, # 2nd highest value; idx. 10 -9.8_5_7_1_1_8_3_6, -5.9_6_2_0_9_2_3_6, -1.1_3_0_3_9_1_6_1, -7.1_1_1_5_2_9_4, -0.8_3_6_9_6_3_3, -5.3_1_8_6_4_0_8, 7.0_6_4_2_7_4_0_7, 0.8_1_3_6_9_3_4_4, -0.8_2_0_2_3_8_1_7, -5.9_1_7_9_7_9_6, 0.5_8_8_1_3_4_4_3, -6.9_9_7_7_8_4_3_8, 4.7_1_5_5_1_1_8_9, -0.1_8_7_7_1_6_3_7, 7.4_4_0_2_0_7_5_9, # 4th highest value; idx. 25 9.3_8_4_5_0_9_8_7, # 1st highest value; idx. 26 2.1_2_6_6_2_9_4_1, -9.3_2_5_6_2_0_3_8, 2.3_5_6_5_2_5_2_2, ], # cummulative prob of 5 highest values <= 0.6 [ 0.5_8_4_2_5_5_1_8, 4.5_3_1_3_9_2_3_8, -5.5_7_5_1_0_4_6_4, -6.2_8_0_3_0_6_9_9, -7.1_9_5_2_9_5_0_3, -4.0_2_1_2_2_5_5_1, 1.3_9_3_3_7_0_3_7, -6.0_6_7_0_7_0_5_7, 1.5_9_4_8_0_5_1_7, -9.6_4_3_1_1_9, 0.0_3_9_0_7_7_9_9, 0.6_7_2_3_1_7_6_2, -8.8_8_2_0_6_7_2_6, 6.2_7_1_1_5_9_2_2, # 4th highest value; idx. 13 2.2_8_5_2_0_7_2_3, 4.8_2_7_6_7_5_0_6, 4.3_0_4_2_1_3_6_8, 8.8_2_7_5_3_1_3, # 2nd highest value; idx. 17 5.4_4_0_2_9_9_5_8, # 5th highest value; idx. 18 -4.4_7_3_5_7_9_4, 7.3_8_5_7_9_5_3_6, # 3rd highest value; idx. 20 -2.9_1_0_5_1_6_6_3, 2.6_1_9_4_6_0_7_7, -2.5_6_7_4_7_6_2, -9.4_8_9_5_9_3_0_2, -4.0_2_9_2_2_6_4_5, -1.3_5_4_1_6_9_1_8, 9.6_7_7_0_2_3_2_3, # 1st highest value; idx. 27 -5.8_9_4_7_8_5_5_3, 1.8_5_3_7_0_4_6_7, ], # cummulative prob of 5 highest values <= 0.6 ] , dtype=tf.floataa , ) A_ : Optional[int] = tf.convert_to_tensor( [[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]] , dtype=tf.intaa , ) # expected non filtered idx as noted above A_ : List[Any] = tf.convert_to_tensor( [8.2_2_2_0_9_9, 7.3_5_3_4_1_2_6, 8.4_3_2_0_7_8, 7.4_4_0_2_0_7_5, 9.3_8_4_5_1, 6.2_7_1_1_5_9, 8.8_2_7_5_3_1, 5.4_4_0_2_9_9_5, 7.3_8_5_7_9_5_6, 9.6_7_7_0_2_3] , dtype=tf.floataa , ) # expected non filtered values as noted above A_ : Any = tf_top_k_top_p_filtering(a__ , top_k=10 , top_p=0.6 , min_tokens_to_keep=4 ) A_ : int = output[output != -float('''inf''' )] A_ : str = tf.cast( tf.where(tf.not_equal(a__ , tf.constant(-float('''inf''' ) , dtype=tf.floataa ) ) ) , dtype=tf.intaa , ) tf.debugging.assert_near(a__ , a__ , rtol=1e-12 ) tf.debugging.assert_equal(a__ , a__ ) @require_tf class _lowerCamelCase ( unittest.TestCase , lowercase_ ): """simple docstring""" # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_tf_available(): snake_case = { "AutoModelForCausalLM": TFAutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": TFAutoModelForSpeechSeqaSeq, "AutoModelForSeq2SeqLM": TFAutoModelForSeqaSeqLM, "AutoModelForVision2Seq": TFAutoModelForVisionaSeq, "LogitsProcessorList": TFLogitsProcessorList, "MinLengthLogitsProcessor": TFMinLengthLogitsProcessor, "create_tensor_fn": tf.convert_to_tensor, "floats_tensor": floats_tensor, "return_tensors": "tf", } @slow def _snake_case ( self )->Any: '''simple docstring''' A_ : Optional[Any] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : List[str] = 2 A_ : Optional[int] = 2 class _lowerCamelCase ( tf.Module ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE )->List[str]: '''simple docstring''' super(a__ , self ).__init__() A_ : Optional[int] = model @tf.function( input_signature=( tf.TensorSpec((None, input_length) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((None, input_length) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=a__ , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int: '''simple docstring''' A_ : Tuple = self.model.generate( input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , ) return {"sequences": outputs["sequences"]} A_ : str = [[2, 0], [102, 103]] A_ : List[str] = [[1, 0], [1, 1]] A_ : Optional[Any] = DummyModel(model=a__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a__ , a__ , signatures={'''serving_default''': dummy_model.serving} ) A_ : Union[str, Any] = tf.saved_model.load(a__ ).signatures['''serving_default'''] for batch_size in range(1 , len(a__ ) + 1 ): A_ : str = { '''input_ids''': tf.constant(dummy_input_ids[:batch_size] ), '''attention_mask''': tf.constant(dummy_attention_masks[:batch_size] ), } A_ : Dict = serving_func(**a__ )['''sequences'''] A_ : Tuple = test_model.generate(**a__ , max_new_tokens=a__ ) tf.debugging.assert_equal(a__ , a__ ) @slow def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ : List[str] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : str = 1 A_ : Any = 2 class _lowerCamelCase ( tf.Module ): """simple docstring""" def __init__( self , _SCREAMING_SNAKE_CASE )->Dict: '''simple docstring''' super(a__ , self ).__init__() A_ : Union[str, Any] = model @tf.function( input_signature=( tf.TensorSpec((batch_size, None) , tf.intaa , name='''input_ids''' ), tf.TensorSpec((batch_size, None) , tf.intaa , name='''attention_mask''' ), ) , jit_compile=a__ , ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->int: '''simple docstring''' A_ : Optional[Any] = self.model.generate( input_ids=a__ , attention_mask=a__ , max_new_tokens=a__ , return_dict_in_generate=a__ , ) return {"sequences": outputs["sequences"]} A_ : int = [[2], [102, 103]] A_ : Optional[Any] = [[1], [1, 1]] A_ : List[Any] = DummyModel(model=a__ ) with tempfile.TemporaryDirectory() as tmp_dir: tf.saved_model.save(a__ , a__ , signatures={'''serving_default''': dummy_model.serving} ) A_ : Union[str, Any] = tf.saved_model.load(a__ ).signatures['''serving_default'''] for input_row in range(len(a__ ) ): A_ : str = { '''input_ids''': tf.constant([dummy_input_ids[input_row]] ), '''attention_mask''': tf.constant([dummy_attention_masks[input_row]] ), } A_ : List[Any] = serving_func(**a__ )['''sequences'''] A_ : Any = test_model.generate(**a__ , max_new_tokens=a__ ) tf.debugging.assert_equal(a__ , a__ ) @slow @require_tensorflow_text def _snake_case ( self )->Tuple: '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: # file needed to load the TF tokenizer hf_hub_download(repo_id='''google/flan-t5-small''' , filename='''spiece.model''' , local_dir=a__ ) class _lowerCamelCase ( tf.keras.layers.Layer ): """simple docstring""" def __init__( self )->str: '''simple docstring''' super().__init__() A_ : List[str] = text.SentencepieceTokenizer( model=tf.io.gfile.GFile(os.path.join(a__ , '''spiece.model''' ) , '''rb''' ).read() ) A_ : str = TFAutoModelForSeqaSeqLM.from_pretrained('''hf-internal-testing/tiny-random-t5''' ) def _snake_case ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->List[str]: '''simple docstring''' A_ : Optional[int] = self.tokenizer.tokenize(a__ ) A_ , A_ : Any = text.pad_model_inputs( a__ , max_seq_length=64 , pad_value=self.model.config.pad_token_id ) A_ : List[Any] = self.model.generate(input_ids=a__ , attention_mask=a__ ) return self.tokenizer.detokenize(a__ ) A_ : int = CompleteSentenceTransformer() A_ : List[str] = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name='''inputs''' ) A_ : Dict = complete_model(a__ ) A_ : Optional[Any] = tf.keras.Model(a__ , a__ ) keras_model.save(a__ ) def _snake_case ( self )->Tuple: '''simple docstring''' A_ : Optional[Any] = { '''do_sample''': True, '''num_beams''': 1, '''top_p''': 0.7, '''top_k''': 10, '''temperature''': 0.7, } A_ : int = 14 A_ : int = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Optional[int] = '''Hello, my dog is cute and''' A_ : int = tokenizer(a__ , return_tensors='''tf''' ) A_ : Optional[int] = TFAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ) A_ : Optional[Any] = 638 # forces the generation to happen on CPU, to avoid GPU-related quirks with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) A_ : Tuple = model.generate(**a__ , eos_token_id=a__ , **a__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) A_ : Optional[Any] = [638, 198] with tf.device(''':/CPU:0''' ): tf.random.set_seed(0 ) A_ : Union[str, Any] = model.generate(**a__ , eos_token_id=a__ , **a__ ) self.assertTrue(expectation == len(generated_tokens[0] ) ) def _snake_case ( self )->Optional[Any]: '''simple docstring''' A_ : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) A_ : Union[str, Any] = '''Hugging Face is a technology company based in New York and Paris.''' A_ : List[Any] = bart_tokenizer(a__ , return_tensors='''tf''' ).input_ids A_ : Any = TFBartForConditionalGeneration.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) A_ : Optional[Any] = bart_model.generate(a__ ).numpy() class _lowerCamelCase ( lowercase_ ): """simple docstring""" def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE )->Dict: '''simple docstring''' return super().call(a__ , **a__ ) A_ : Optional[Any] = FakeBart.from_pretrained('''hf-internal-testing/tiny-random-bart''' ) A_ : str = bart_model.generate(a__ , foo='''bar''' ).numpy() self.assertTrue(np.array_equal(a__ , a__ ) ) class _lowerCamelCase ( bart_model.model.encoder.__class__ ): """simple docstring""" def _snake_case ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )->Union[str, Any]: '''simple docstring''' return super().call(a__ , **a__ ) A_ : Optional[Any] = FakeEncoder(bart_model.config , bart_model.model.shared ) A_ : Optional[int] = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) A_ : Dict = bart_model.generate(a__ ).numpy() with self.assertRaises(a__ ): # FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo" bart_model.generate(a__ , foo='''bar''' )
186
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _snake_case ( unittest.TestCase ): lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" ) # Using `do_sample=False` to force deterministic output snake_case_ = text_generator("This is a test" , do_sample=a__ ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ] , ) snake_case_ = text_generator(["This is a test", "This is a second test"] ) self.assertEqual( a__ , [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ] , ) snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ ) self.assertEqual( a__ , [ {"generated_token_ids": ANY(a__ )}, {"generated_token_ids": ANY(a__ )}, ] , ) snake_case_ = text_generator.model.config.eos_token_id snake_case_ = "<pad>" snake_case_ = text_generator( ["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , ) self.assertEqual( a__ , [ [ {"generated_token_ids": ANY(a__ )}, {"generated_token_ids": ANY(a__ )}, ], [ {"generated_token_ids": ANY(a__ )}, {"generated_token_ids": ANY(a__ )}, ], ] , ) @require_tf def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" ) # Using `do_sample=False` to force deterministic output snake_case_ = text_generator("This is a test" , do_sample=a__ ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ] , ) snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ ) self.assertEqual( a__ , [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ] , ) def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str: '''simple docstring''' snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ ) return text_generator, ["This is a test", "Another test"] def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = "Hello I believe in" snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) snake_case_ = text_generator(a__ ) self.assertEqual( a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , ) snake_case_ = text_generator(a__ , stop_sequence=" fe" ) self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] ) def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple: '''simple docstring''' snake_case_ = text_generator.model snake_case_ = text_generator.tokenizer snake_case_ = text_generator("This is a test" ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) snake_case_ = text_generator("This is a test" , return_full_text=a__ ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ ) snake_case_ = text_generator("This is a test" ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) snake_case_ = text_generator("This is a test" , return_full_text=a__ ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ ) self.assertEqual( a__ , [ [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], ] , ) if text_generator.tokenizer.pad_token is not None: snake_case_ = text_generator( ["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ ) self.assertEqual( a__ , [ [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], ] , ) with self.assertRaises(a__ ): snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ ) with self.assertRaises(a__ ): snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ ) with self.assertRaises(a__ ): snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): snake_case_ = text_generator("" ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): snake_case_ = text_generator("" ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"] if ( tokenizer.model_max_length < 10_000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator("This is a test" * 500 , max_new_tokens=20 ) snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(a__ ): text_generator( "This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' import torch # Classic `model_kwargs` snake_case_ = pipeline( model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) snake_case_ = pipe("This is a test" ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) snake_case_ = pipe("This is a test" ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) snake_case_ = pipe("This is a test" ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) @require_torch @require_torch_gpu def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' import torch snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa ) pipe("This is a test" ) @require_torch @require_accelerate @require_torch_gpu def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' import torch snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa ) pipe("This is a test" , do_sample=a__ , top_p=0.5 ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = "Hello world" snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) if text_generator.model.framework == "tf": snake_case_ = logging.get_logger("transformers.generation.tf_utils" ) else: snake_case_ = logging.get_logger("transformers.generation.utils" ) snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(a__ ) as cl: snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 ) self.assertIn(a__ , cl.out ) # The user only sets one -> no warning with CaptureLogger(a__ ) as cl: snake_case_ = text_generator(a__ , max_new_tokens=1 ) self.assertNotIn(a__ , cl.out ) with CaptureLogger(a__ ) as cl: snake_case_ = text_generator(a__ , max_length=10 ) self.assertNotIn(a__ , cl.out )
85
0
'''simple docstring''' from unittest.mock import Mock, patch from file_transfer.send_file import send_file @patch("socket.socket" ) @patch("builtins.open" ) def _UpperCamelCase ( __A , __A ) -> int: '''simple docstring''' UpperCamelCase__ = Mock() UpperCamelCase__ = conn, Mock() UpperCamelCase__ = iter([1, None] ) UpperCamelCase__ = lambda __A : next(__A ) # ===== invoke ===== send_file(filename="mytext.txt" , testing=__A ) # ===== ensurance ===== sock.assert_called_once() sock.return_value.bind.assert_called_once() sock.return_value.listen.assert_called_once() sock.return_value.accept.assert_called_once() conn.recv.assert_called_once() file.return_value.__enter__.assert_called_once() file.return_value.__enter__.return_value.read.assert_called() conn.send.assert_called_once() conn.close.assert_called_once() sock.return_value.shutdown.assert_called_once() sock.return_value.close.assert_called_once()
80
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , lowercase_ ): def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = load_tool("text-classification" ) self.tool.setup() snake_case_ = load_tool("text-classification" , remote=a__ ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" )
85
0
import copy import os import tempfile from unittest import TestCase from unittest.mock import patch import numpy as np import pyarrow as pa import pyarrow.parquet as pq import pytest from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence from datasets.features import ArrayaD, ClassLabel, Features, Image, Value from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects from datasets.keyhash import DuplicatedKeysError, InvalidKeyError from .utils import require_pil class _snake_case ( lowercase_ ): '''simple docstring''' def A__ ( self: Tuple ) -> Optional[Any]: UpperCAmelCase_ : List[Any] = pa.array(TypedSequence([1, 2, 3] ) ) self.assertEqual(arr.type ,pa.intaa() ) def A__ ( self: Dict ) -> int: with self.assertRaises(a__ ): UpperCAmelCase_ : Any = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() ) def A__ ( self: Optional[int] ) -> str: with self.assertRaises(a__ ): UpperCAmelCase_ : str = pa.array(TypedSequence([1, 2, 3] ,try_type=Value("""bool""" ) ,type=Value("""int64""" ) ) ) def A__ ( self: Union[str, Any] ) -> Optional[Any]: UpperCAmelCase_ : Tuple = pa.array(TypedSequence([1, 2, 3] ,type=Value("""int32""" ) ) ) self.assertEqual(arr.type ,pa.intaa() ) def A__ ( self: str ) -> List[str]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCAmelCase_ : Union[str, Any] = pa.array(TypedSequence(["""foo""", """bar"""] ,type=Value("""int64""" ) ) ) def A__ ( self: int ) -> List[str]: UpperCAmelCase_ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ,try_type=Value("""int32""" ) ) ) self.assertEqual(arr.type ,pa.intaa() ) def A__ ( self: Optional[Any] ) -> List[Any]: UpperCAmelCase_ : Tuple = pa.array(TypedSequence(["""foo""", """bar"""] ,try_type=Value("""int64""" ) ) ) self.assertEqual(arr.type ,pa.string() ) def A__ ( self: Tuple ) -> str: UpperCAmelCase_ : str = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,"""int64""" ) ) ) self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,"""int64""" ) ) def A__ ( self: int ) -> List[str]: with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ): UpperCAmelCase_ : List[Any] = pa.array(TypedSequence(["""foo""", """bar"""] ,type=ArrayaD((1, 3) ,"""int64""" ) ) ) def A__ ( self: Any ) -> str: UpperCAmelCase_ : Dict = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,"""int64""" ) ) ) self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,"""int64""" ) ) def A__ ( self: Dict ) -> List[Any]: UpperCAmelCase_ : Optional[Any] = pa.array(TypedSequence(["""foo""", """bar"""] ,try_type=ArrayaD((1, 3) ,"""int64""" ) ) ) self.assertEqual(arr.type ,pa.string() ) @require_pil def A__ ( self: List[str] ) -> Dict: import PIL.Image UpperCAmelCase_ : Optional[Any] = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) ) with patch( """datasets.arrow_writer.cast_to_python_objects""" ,side_effect=a__ ) as mock_cast_to_python_objects: UpperCAmelCase_ : Any = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] ,type=Image() ) ) UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = mock_cast_to_python_objects.call_args_list[-1] self.assertIn("""optimize_list_casting""" ,a__ ) self.assertFalse(kwargs["""optimize_list_casting"""] ) def lowerCamelCase_ ( _a : Any , _a : int ): '''simple docstring''' UpperCAmelCase_ : List[str] = pa.BufferReader(_a ) if isinstance(_a , pa.Buffer ) else pa.memory_map(_a ) UpperCAmelCase_ : Dict = pa.ipc.open_stream(_a ) UpperCAmelCase_ : Optional[Any] = f.read_all() assert len(pa_table.to_batches() ) == expected_num_chunks assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} del pa_table @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowerCamelCase_ ( _a : Dict , _a : Dict ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = pa.BufferOutputStream() UpperCAmelCase_ : int = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCAmelCase_ , UpperCAmelCase_ : str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase_ : Any = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : Union[str, Any] = pa.BufferOutputStream() UpperCAmelCase_ : str = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} ) with ArrowWriter(stream=_a , features=_a ) as writer: writer.write({"""labels""": 0} ) writer.write({"""labels""": 1} ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == features.arrow_schema assert writer._schema.metadata == features.arrow_schema.metadata UpperCAmelCase_ : Union[str, Any] = pa.BufferReader(output.getvalue() ) UpperCAmelCase_ : Dict = pa.ipc.open_stream(_a ) UpperCAmelCase_ : Dict = f.read_all() UpperCAmelCase_ : Optional[Any] = pa_table.schema assert pa_table.num_rows == 2 assert schema == features.arrow_schema assert schema.metadata == features.arrow_schema.metadata assert features == Features.from_arrow_schema(_a ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) def lowerCamelCase_ ( _a : Optional[Any] ): '''simple docstring''' UpperCAmelCase_ : Optional[Any] = pa.BufferOutputStream() with ArrowWriter( stream=_a , writer_batch_size=_a , hash_salt="""split_name""" , check_duplicates=_a , ) as writer: with pytest.raises(_a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] ) UpperCAmelCase_ , UpperCAmelCase_ : str = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def lowerCamelCase_ ( _a : Dict ): '''simple docstring''' UpperCAmelCase_ : str = pa.BufferOutputStream() with ArrowWriter( stream=_a , writer_batch_size=_a , hash_salt="""split_name""" , check_duplicates=_a , ) as writer: with pytest.raises(_a ): writer.write({"""col_1""": """foo""", """col_2""": 1} , key=10 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=10 ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = writer.finalize() @pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 10] ) def lowerCamelCase_ ( _a : Any ): '''simple docstring''' UpperCAmelCase_ : int = pa.BufferOutputStream() with ArrowWriter( stream=_a , writer_batch_size=_a , hash_salt="""split_name""" , check_duplicates=_a , ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 ) writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 ) UpperCAmelCase_ , UpperCAmelCase_ : str = writer.finalize() assert num_examples == 2 assert num_bytes > 0 _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowerCamelCase_ ( _a : Union[str, Any] , _a : List[str] ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = pa.BufferOutputStream() UpperCAmelCase_ : Any = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) writer.write_batch({"""col_1""": [], """col_2""": []} ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase_ : List[str] = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowerCamelCase_ ( _a : str , _a : Dict ): '''simple docstring''' UpperCAmelCase_ : str = pa.BufferOutputStream() UpperCAmelCase_ : Optional[Any] = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) ) UpperCAmelCase_ , UpperCAmelCase_ : Any = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) @pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 10] ) @pytest.mark.parametrize( """fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] ) def lowerCamelCase_ ( _a : Optional[Any] , _a : Optional[int] ): '''simple docstring''' UpperCAmelCase_ : List[str] = pa.BufferOutputStream() UpperCAmelCase_ : Optional[Any] = pa.schema(_a ) if fields else None with ArrowWriter(stream=_a , schema=_a , writer_batch_size=_a ) as writer: writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) ) writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) ) UpperCAmelCase_ , UpperCAmelCase_ : Dict = writer.finalize() assert num_examples == 2 assert num_bytes > 0 if not fields: UpperCAmelCase_ : Dict = {"""col_1""": pa.string(), """col_2""": pa.intaa()} assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 ) def lowerCamelCase_ ( ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_dir: UpperCAmelCase_ : Tuple = {"""col_1""": pa.string(), """col_2""": pa.intaa()} UpperCAmelCase_ : int = os.path.join(_a , """test.arrow""" ) with ArrowWriter(path=_a , schema=pa.schema(_a ) ) as writer: writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert writer._schema == pa.schema(_a , metadata=writer._schema.metadata ) _check_output(_a , 1 ) def lowerCamelCase_ ( _a : Tuple ): '''simple docstring''' if pa.types.is_list(_a ): return get_base_dtype(arr_type.value_type ) else: return arr_type def lowerCamelCase_ ( _a : Tuple , _a : Dict ): '''simple docstring''' if isinstance(lst[0] , _a ): change_first_primitive_element_in_list(lst[0] , _a ) else: UpperCAmelCase_ : Optional[int] = value @pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def lowerCamelCase_ ( _a : List[Any] , _a : Dict , _a : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = pa.array(TypedSequence(_a , optimized_int_type=_a ) ) assert get_base_dtype(arr.type ) == expected_dtype @pytest.mark.parametrize( """col, expected_dtype""" , [ ("""attention_mask""", pa.inta()), ("""special_tokens_mask""", pa.inta()), ("""token_type_ids""", pa.inta()), ("""input_ids""", pa.intaa()), ("""other""", pa.intaa()), ] , ) @pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] ) def lowerCamelCase_ ( _a : Optional[int] , _a : Any , _a : Dict ): '''simple docstring''' UpperCAmelCase_ : Optional[int] = pa.array(OptimizedTypedSequence(_a , col=_a ) ) assert get_base_dtype(arr.type ) == expected_dtype # not in range if col != "other": # avoids errors due to in-place modifications UpperCAmelCase_ : str = copy.deepcopy(_a ) UpperCAmelCase_ : Union[str, Any] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1 change_first_primitive_element_in_list(_a , _a ) UpperCAmelCase_ : Tuple = pa.array(OptimizedTypedSequence(_a , col=_a ) ) assert get_base_dtype(arr.type ) == pa.intaa() @pytest.mark.parametrize("""raise_exception""" , [False, True] ) def lowerCamelCase_ ( _a : List[str] , _a : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : int = str(tmp_path / """dataset-train.arrow""" ) try: with ArrowWriter(path=_a ) as writer: if raise_exception: raise pa.lib.ArrowInvalid() else: writer.stream.close() except pa.lib.ArrowInvalid: pass finally: assert writer.stream.closed def lowerCamelCase_ ( _a : Union[str, Any] ): '''simple docstring''' UpperCAmelCase_ : Dict = """mock://dataset-train.arrow""" with ArrowWriter(path=_a , storage_options=mockfs.storage_options ) as writer: assert isinstance(writer._fs , type(_a ) ) assert writer._fs.storage_options == mockfs.storage_options writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = writer.finalize() assert num_examples == 2 assert num_bytes > 0 assert mockfs.exists(_a ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : str = pa.BufferOutputStream() with ParquetWriter(stream=_a ) as writer: writer.write({"""col_1""": """foo""", """col_2""": 1} ) writer.write({"""col_1""": """bar""", """col_2""": 2} ) UpperCAmelCase_ , UpperCAmelCase_ : Tuple = writer.finalize() assert num_examples == 2 assert num_bytes > 0 UpperCAmelCase_ : int = pa.BufferReader(output.getvalue() ) UpperCAmelCase_ : str = pq.read_table(_a ) assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]} @require_pil @pytest.mark.parametrize("""embed_local_files""" , [False, True] ) def lowerCamelCase_ ( _a : Optional[int] , _a : List[str] ): '''simple docstring''' import PIL.Image UpperCAmelCase_ : Any = str(tmp_path / """test_image_rgb.jpg""" ) PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_a , format="""png""" ) UpperCAmelCase_ : List[Any] = pa.BufferOutputStream() with ParquetWriter( stream=_a , features=Features({"""image""": Image()} ) , embed_local_files=_a ) as writer: writer.write({"""image""": image_path} ) writer.finalize() UpperCAmelCase_ : Dict = pa.BufferReader(output.getvalue() ) UpperCAmelCase_ : str = pq.read_table(_a ) UpperCAmelCase_ : Optional[Any] = pa_table.to_pydict() if embed_local_files: assert isinstance(out["""image"""][0]["""path"""] , _a ) with open(_a , """rb""" ) as f: assert out["image"][0]["bytes"] == f.read() else: assert out["image"][0]["path"] == image_path assert out["image"][0]["bytes"] is None def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : List[str] = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_a )] ) UpperCAmelCase_ : List[Any] = pa.BufferOutputStream() with ArrowWriter(stream=_a ) as writer: writer._build_writer(inferred_schema=_a ) assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] )
345
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Dict = { "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json", "BridgeTower/bridgetower-base-itm-mlm": ( "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json" ), } class _snake_case ( lowercase_ ): lowerCAmelCase_ : Dict = "bridgetower_vision_model" def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int: '''simple docstring''' super().__init__(**a__ ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_channels snake_case_ = patch_size snake_case_ = image_size snake_case_ = initializer_factor snake_case_ = layer_norm_eps snake_case_ = stop_gradient snake_case_ = share_layernorm snake_case_ = remove_last_layer @classmethod def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig": '''simple docstring''' snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ ) if config_dict.get("model_type" ) == "bridgetower": snake_case_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a__ , **a__ ) class _snake_case ( lowercase_ ): lowerCAmelCase_ : Any = "bridgetower_text_model" def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**a__ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = initializer_factor snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = position_embedding_type snake_case_ = use_cache snake_case_ = pad_token_id snake_case_ = bos_token_id snake_case_ = eos_token_id @classmethod def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig": '''simple docstring''' snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ ) if config_dict.get("model_type" ) == "bridgetower": snake_case_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a__ , **a__ ) class _snake_case ( lowercase_ ): lowerCAmelCase_ : Union[str, Any] = "bridgetower" def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int: '''simple docstring''' snake_case_ = kwargs.pop("text_config_dict" , a__ ) snake_case_ = kwargs.pop("vision_config_dict" , a__ ) super().__init__(**a__ ) snake_case_ = share_cross_modal_transformer_layers snake_case_ = hidden_act snake_case_ = hidden_size snake_case_ = initializer_factor snake_case_ = layer_norm_eps snake_case_ = share_link_tower_layers snake_case_ = link_tower_type snake_case_ = num_attention_heads snake_case_ = num_hidden_layers snake_case_ = tie_word_embeddings snake_case_ = init_layernorm_from_vision_encoder if text_config is None: snake_case_ = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: snake_case_ = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) snake_case_ = BridgeTowerTextConfig(**a__ ) snake_case_ = BridgeTowerVisionConfig(**a__ ) @classmethod def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.text_config.to_dict() snake_case_ = self.vision_config.to_dict() snake_case_ = self.__class__.model_type return output
85
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowercase_ = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowercase_ = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys lowercase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
58
'''simple docstring''' from __future__ import annotations def UpperCamelCase_( snake_case : list[int] ): '''simple docstring''' return len(set(snake_case ) ) == len(snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
85
0
import os import unittest from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer from transformers.testing_utils import get_tests_dir from ...test_tokenization_common import TokenizerTesterMixin a : Union[str, Any] = get_tests_dir('fixtures/test_sentencepiece_bpe.model') class _a ( lowercase_ , unittest.TestCase ): A = BartphoTokenizer A = False A = True def __snake_case (self ) -> int: super().setUp() UpperCAmelCase_: Optional[int] = ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] UpperCAmelCase_: List[Any] = dict(zip(a__, range(len(a__ ) ) ) ) UpperCAmelCase_: Any = {"""unk_token""": """<unk>"""} UpperCAmelCase_: Any = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["""monolingual_vocab_file"""] ) with open(self.monolingual_vocab_file, """w""", encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(f'{token} {vocab_tokens[token]}\n' ) UpperCAmelCase_: int = BartphoTokenizer(a__, self.monolingual_vocab_file, **self.special_tokens_map ) tokenizer.save_pretrained(self.tmpdirname ) def __snake_case (self, **SCREAMING_SNAKE_CASE_ ) -> Any: kwargs.update(self.special_tokens_map ) return BartphoTokenizer.from_pretrained(self.tmpdirname, **a__ ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> int: UpperCAmelCase_: Any = """This is a là test""" UpperCAmelCase_: Optional[Any] = """This is a<unk><unk> test""" return input_text, output_text def __snake_case (self ) -> List[Any]: UpperCAmelCase_: Optional[int] = BartphoTokenizer(a__, self.monolingual_vocab_file, **self.special_tokens_map ) UpperCAmelCase_: Any = """This is a là test""" UpperCAmelCase_: Dict = """▁This ▁is ▁a ▁l à ▁t est""".split() UpperCAmelCase_: Any = tokenizer.tokenize(a__ ) self.assertListEqual(a__, a__ ) UpperCAmelCase_: Any = tokens + [tokenizer.unk_token] UpperCAmelCase_: Optional[Any] = [4, 5, 6, 3, 3, 7, 8, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a__ ), a__ )
147
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _SCREAMING_SNAKE_CASE : Any = False try: _SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class _snake_case : def __init__( self , a__ = None , a__ = [] ) -> List[str]: '''simple docstring''' snake_case_ = 0 snake_case_ = choices snake_case_ = prompt if sys.platform == "win32": snake_case_ = "*" else: snake_case_ = "➔ " def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int: '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index] , 32 , a__ ) else: forceWrite(self.choices[index] , a__ ) def lowerCAmelCase__ ( self , a__ ) -> Tuple: '''simple docstring''' if index == self.position: forceWrite(F' {self.arrow_char} ' ) self.write_choice(a__ ) else: forceWrite(F' {self.choices[index]}' ) reset_cursor() def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]: '''simple docstring''' snake_case_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(a__ ) move_cursor(a__ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = int(chr(self.current_selection ) ) snake_case_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , a__ ) else: return else: return def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]: '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) snake_case_ = default_choice for i in range(len(self.choices ) ): self.print_choice(a__ ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: snake_case_ = int(builtins.input() ) except ValueError: snake_case_ = default_choice else: snake_case_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(a__ , "\n" ) return choice
85
0
from __future__ import annotations import collections import pprint from pathlib import Path def UpperCamelCase ( __lowercase : str ): '''simple docstring''' return "".join(sorted(__lowercase ) ) def UpperCamelCase ( __lowercase : str ): '''simple docstring''' return word_by_signature[signature(__lowercase )] _UpperCAmelCase = Path(__file__).parent.joinpath("""words.txt""").read_text(encoding="""utf-8""") _UpperCAmelCase = sorted({word.strip().lower() for word in data.splitlines()}) _UpperCAmelCase = collections.defaultdict(list) for word in word_list: word_by_signature[signature(word)].append(word) if __name__ == "__main__": _UpperCAmelCase = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("""anagrams.txt""", """w""") as file: file.write("""all_anagrams = \n """) file.write(pprint.pformat(all_anagrams))
140
'''simple docstring''' import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase_( snake_case : Optional[int] ): '''simple docstring''' return EnvironmentCommand() class _snake_case ( lowercase_ ): @staticmethod def lowerCAmelCase__ ( a__ ) -> Optional[int]: '''simple docstring''' snake_case_ = parser.add_parser("env" ) download_parser.set_defaults(func=a__ ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = huggingface_hub.__version__ snake_case_ = "not installed" snake_case_ = "NA" if is_torch_available(): import torch snake_case_ = torch.__version__ snake_case_ = torch.cuda.is_available() snake_case_ = "not installed" if is_transformers_available(): import transformers snake_case_ = transformers.__version__ snake_case_ = "not installed" if is_accelerate_available(): import accelerate snake_case_ = accelerate.__version__ snake_case_ = "not installed" if is_xformers_available(): import xformers snake_case_ = xformers.__version__ snake_case_ = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(a__ ) ) return info @staticmethod def lowerCAmelCase__ ( a__ ) -> str: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
85
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging UpperCAmelCase_ : List[str] = logging.get_logger(__name__) class lowerCAmelCase__ ( lowercase_ ): '''simple docstring''' __UpperCamelCase = ["input_values", "padding_mask"] def __init__( self : Union[str, Any] , lowercase_ : Optional[int] = 1 , lowercase_ : str = 24000 , lowercase_ : Dict = 0.0 , lowercase_ : Dict = None , lowercase_ : Any = None , **lowercase_ : Tuple , ): '''simple docstring''' super().__init__(feature_size=a__ , sampling_rate=a__ , padding_value=a__ , **a__) SCREAMING_SNAKE_CASE_ : Any = chunk_length_s SCREAMING_SNAKE_CASE_ : Optional[int] = overlap @property def _SCREAMING_SNAKE_CASE ( self : Any): '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) @property def _SCREAMING_SNAKE_CASE ( self : List[str]): '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length)) def __call__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Optional[int] = None , lowercase_ : Dict = False , lowercase_ : List[Any] = None , lowercase_ : Any = None , lowercase_ : Tuple = None , ): '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of' F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with' F' {self.sampling_rate} and not {sampling_rate}.') else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''') if padding and truncation: raise ValueError('''Both padding and truncation were set. Make sure you only set one.''') elif padding is None: # by default let's pad the inputs SCREAMING_SNAKE_CASE_ : Tuple = True SCREAMING_SNAKE_CASE_ : Any = bool( isinstance(a__ , (list, tuple)) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list)))) if is_batched: SCREAMING_SNAKE_CASE_ : int = [np.asarray(a__ , dtype=np.floataa).T for audio in raw_audio] elif not is_batched and not isinstance(a__ , np.ndarray): SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.asarray(a__ , dtype=np.floataa) elif isinstance(a__ , np.ndarray) and raw_audio.dtype is np.dtype(np.floataa): SCREAMING_SNAKE_CASE_ : Any = raw_audio.astype(np.floataa) # always return batch if not is_batched: SCREAMING_SNAKE_CASE_ : Any = [np.asarray(a__).T] # verify inputs are valid for idx, example in enumerate(a__): if example.ndim > 2: raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}') if self.feature_size == 1 and example.ndim != 1: raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels') if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels') SCREAMING_SNAKE_CASE_ : int = None SCREAMING_SNAKE_CASE_ : Optional[int] = BatchFeature({'''input_values''': raw_audio}) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: SCREAMING_SNAKE_CASE_ : Dict = min(array.shape[0] for array in raw_audio) SCREAMING_SNAKE_CASE_ : str = int(np.floor(max_length / self.chunk_stride)) SCREAMING_SNAKE_CASE_ : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: SCREAMING_SNAKE_CASE_ : List[Any] = max(array.shape[0] for array in raw_audio) SCREAMING_SNAKE_CASE_ : List[Any] = int(np.ceil(max_length / self.chunk_stride)) SCREAMING_SNAKE_CASE_ : str = (nb_step - 1) * self.chunk_stride + self.chunk_length SCREAMING_SNAKE_CASE_ : Any = '''max_length''' else: SCREAMING_SNAKE_CASE_ : List[str] = input_values # normal padding on batch if padded_inputs is None: SCREAMING_SNAKE_CASE_ : Optional[int] = self.pad( a__ , max_length=a__ , truncation=a__ , padding=a__ , return_attention_mask=a__ , ) if padding: SCREAMING_SNAKE_CASE_ : int = padded_inputs.pop('''attention_mask''') SCREAMING_SNAKE_CASE_ : str = [] for example in padded_inputs.pop('''input_values'''): if self.feature_size == 1: SCREAMING_SNAKE_CASE_ : Optional[Any] = example[..., None] input_values.append(example.T) SCREAMING_SNAKE_CASE_ : Optional[int] = input_values if return_tensors is not None: SCREAMING_SNAKE_CASE_ : List[Any] = padded_inputs.convert_to_tensors(a__) return padded_inputs
91
'''simple docstring''' import os _SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = 0 snake_case_ = 0 while index < len(snake_case ) - 1: snake_case_ = SYMBOLS[numerals[index]] snake_case_ = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCamelCase_( snake_case : int ): '''simple docstring''' snake_case_ = "" snake_case_ = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 snake_case_ = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 snake_case_ = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ): '''simple docstring''' snake_case_ = 0 with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea: snake_case_ = filea.readlines() for line in lines: snake_case_ = line.strip() snake_case_ = parse_roman_numerals(snake_case ) snake_case_ = generate_roman_numerals(snake_case ) savings += len(snake_case ) - len(snake_case ) return savings if __name__ == "__main__": print(F"{solution() = }")
85
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig __A : Any = { "albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json", "albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json", "albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json", "albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json", "albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json", "albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json", "albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json", "albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json", } class A_ (lowercase_ ): UpperCAmelCase__ = "albert" def __init__( self , _A=3_0_0_0_0 , _A=1_2_8 , _A=4_0_9_6 , _A=1_2 , _A=1 , _A=6_4 , _A=1_6_3_8_4 , _A=1 , _A="gelu_new" , _A=0 , _A=0 , _A=5_1_2 , _A=2 , _A=0.02 , _A=1E-12 , _A=0.1 , _A="absolute" , _A=0 , _A=2 , _A=3 , **_A , ): '''simple docstring''' super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ ) UpperCAmelCase = vocab_size UpperCAmelCase = embedding_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_hidden_groups UpperCAmelCase = num_attention_heads UpperCAmelCase = inner_group_num UpperCAmelCase = hidden_act UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = initializer_range UpperCAmelCase = layer_norm_eps UpperCAmelCase = classifier_dropout_prob UpperCAmelCase = position_embedding_type class A_ (lowercase_ ): @property def _lowercase ( self ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: UpperCAmelCase = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
273
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _SCREAMING_SNAKE_CASE : int = { "configuration_clip": [ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP", "CLIPConfig", "CLIPOnnxConfig", "CLIPTextConfig", "CLIPVisionConfig", ], "processing_clip": ["CLIPProcessor"], "tokenization_clip": ["CLIPTokenizer"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[int] = ["CLIPTokenizerFast"] try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[Any] = ["CLIPFeatureExtractor"] _SCREAMING_SNAKE_CASE : Dict = ["CLIPImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = [ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "CLIPModel", "CLIPPreTrainedModel", "CLIPTextModel", "CLIPTextModelWithProjection", "CLIPVisionModel", "CLIPVisionModelWithProjection", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCLIPModel", "TFCLIPPreTrainedModel", "TFCLIPTextModel", "TFCLIPVisionModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Any = [ "FlaxCLIPModel", "FlaxCLIPPreTrainedModel", "FlaxCLIPTextModel", "FlaxCLIPTextPreTrainedModel", "FlaxCLIPVisionModel", "FlaxCLIPVisionPreTrainedModel", ] if TYPE_CHECKING: from .configuration_clip import ( CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPConfig, CLIPOnnxConfig, CLIPTextConfig, CLIPVisionConfig, ) from .processing_clip import CLIPProcessor from .tokenization_clip import CLIPTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_clip_fast import CLIPTokenizerFast try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_clip import CLIPFeatureExtractor from .image_processing_clip import CLIPImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clip import ( CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPModel, CLIPPreTrainedModel, CLIPTextModel, CLIPTextModelWithProjection, CLIPVisionModel, CLIPVisionModelWithProjection, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_clip import ( TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST, TFCLIPModel, TFCLIPPreTrainedModel, TFCLIPTextModel, TFCLIPVisionModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_clip import ( FlaxCLIPModel, FlaxCLIPPreTrainedModel, FlaxCLIPTextModel, FlaxCLIPTextPreTrainedModel, FlaxCLIPVisionModel, FlaxCLIPVisionPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
'''simple docstring''' from dataclasses import dataclass from typing import Dict, Optional, Union import torch import torch.nn.functional as F from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .attention_processor import AttentionProcessor, AttnProcessor from .embeddings import TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin @dataclass class snake_case__ ( lowercase_): a_ = 42 class snake_case__ ( lowercase_ , lowercase_): @register_to_config def __init__( self : Optional[int] , _A : Optional[int] = 32 , _A : List[Any] = 64 , _A : Optional[int] = 20 , _A : Any = 7_68 , _A : Optional[Any]=77 , _A : str=4 , _A : int = 0.0 , _A : Optional[int] = "silu" , _A : Optional[int] = None , _A : Optional[Any] = None , _A : Tuple = "linear" , _A : Any = "prd" , _A : Tuple = None , _A : List[Any] = None , _A : Optional[Any] = None , ) -> Tuple: super().__init__() UpperCAmelCase_ : str = num_attention_heads UpperCAmelCase_ : str = attention_head_dim UpperCAmelCase_ : List[Any] = num_attention_heads * attention_head_dim UpperCAmelCase_ : Dict = additional_embeddings UpperCAmelCase_ : Union[str, Any] = time_embed_dim or inner_dim UpperCAmelCase_ : List[Any] = embedding_proj_dim or embedding_dim UpperCAmelCase_ : Optional[Any] = clip_embed_dim or embedding_dim UpperCAmelCase_ : List[Any] = Timesteps(a__ , a__ , 0 ) UpperCAmelCase_ : int = TimestepEmbedding(a__ , a__ , out_dim=a__ , act_fn=a__ ) UpperCAmelCase_ : Any = nn.Linear(a__ , a__ ) if embedding_proj_norm_type is None: UpperCAmelCase_ : Union[str, Any] = None elif embedding_proj_norm_type == "layer": UpperCAmelCase_ : Dict = nn.LayerNorm(a__ ) else: raise ValueError(F"unsupported embedding_proj_norm_type: {embedding_proj_norm_type}" ) UpperCAmelCase_ : Dict = nn.Linear(a__ , a__ ) if encoder_hid_proj_type is None: UpperCAmelCase_ : Dict = None elif encoder_hid_proj_type == "linear": UpperCAmelCase_ : Optional[Any] = nn.Linear(a__ , a__ ) else: raise ValueError(F"unsupported encoder_hid_proj_type: {encoder_hid_proj_type}" ) UpperCAmelCase_ : Union[str, Any] = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , a__ ) ) if added_emb_type == "prd": UpperCAmelCase_ : List[str] = nn.Parameter(torch.zeros(1 , 1 , a__ ) ) elif added_emb_type is None: UpperCAmelCase_ : Dict = None else: raise ValueError( F"`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`." ) UpperCAmelCase_ : Optional[int] = nn.ModuleList( [ BasicTransformerBlock( a__ , a__ , a__ , dropout=a__ , activation_fn='''gelu''' , attention_bias=a__ , ) for d in range(a__ ) ] ) if norm_in_type == "layer": UpperCAmelCase_ : Optional[Any] = nn.LayerNorm(a__ ) elif norm_in_type is None: UpperCAmelCase_ : Tuple = None else: raise ValueError(F"Unsupported norm_in_type: {norm_in_type}." ) UpperCAmelCase_ : Union[str, Any] = nn.LayerNorm(a__ ) UpperCAmelCase_ : Optional[Any] = nn.Linear(a__ , a__ ) UpperCAmelCase_ : Tuple = torch.full( [num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_00_00.0 ) causal_attention_mask.triu_(1 ) UpperCAmelCase_ : Tuple = causal_attention_mask[None, ...] self.register_buffer('''causal_attention_mask''' , a__ , persistent=a__ ) UpperCAmelCase_ : List[str] = nn.Parameter(torch.zeros(1 , a__ ) ) UpperCAmelCase_ : List[str] = nn.Parameter(torch.zeros(1 , a__ ) ) @property # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors def A ( self : Dict ) -> Dict[str, AttentionProcessor]: UpperCAmelCase_ : List[Any] = {} def fn_recursive_add_processors(_A : Dict , _A : Tuple , _A : Optional[int] ): if hasattr(a__ , '''set_processor''' ): UpperCAmelCase_ : Any = module.processor for sub_name, child in module.named_children(): fn_recursive_add_processors(F"{name}.{sub_name}" , a__ , a__ ) return processors for name, module in self.named_children(): fn_recursive_add_processors(a__ , a__ , a__ ) return processors def A ( self : Tuple , _A : Any ) -> List[Any]: UpperCAmelCase_ : Tuple = len(self.attn_processors.keys() ) if isinstance(a__ , a__ ) and len(a__ ) != count: raise ValueError( F"A dict of processors was passed, but the number of processors {len(a__ )} does not match the" F" number of attention layers: {count}. Please make sure to pass {count} processor classes." ) def fn_recursive_attn_processor(_A : Optional[int] , _A : str , _A : Optional[int] ): if hasattr(a__ , '''set_processor''' ): if not isinstance(a__ , a__ ): module.set_processor(a__ ) else: module.set_processor(processor.pop(F"{name}.processor" ) ) for sub_name, child in module.named_children(): fn_recursive_attn_processor(F"{name}.{sub_name}" , a__ , a__ ) for name, module in self.named_children(): fn_recursive_attn_processor(a__ , a__ , a__ ) def A ( self : Dict ) -> int: self.set_attn_processor(AttnProcessor() ) def A ( self : Optional[Any] , _A : int , _A : Optional[int] , _A : Dict , _A : str = None , _A : List[Any] = None , _A : Tuple = True , ) -> Dict: UpperCAmelCase_ : Union[str, Any] = hidden_states.shape[0] UpperCAmelCase_ : List[str] = timestep if not torch.is_tensor(a__ ): UpperCAmelCase_ : Union[str, Any] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device ) elif torch.is_tensor(a__ ) and len(timesteps.shape ) == 0: UpperCAmelCase_ : Optional[int] = timesteps[None].to(hidden_states.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML UpperCAmelCase_ : int = timesteps * torch.ones(a__ , dtype=timesteps.dtype , device=timesteps.device ) UpperCAmelCase_ : Tuple = self.time_proj(a__ ) # timesteps does not contain any weights and will always return f32 tensors # but time_embedding might be fp16, so we need to cast here. UpperCAmelCase_ : List[Any] = timesteps_projected.to(dtype=self.dtype ) UpperCAmelCase_ : Optional[Any] = self.time_embedding(a__ ) if self.embedding_proj_norm is not None: UpperCAmelCase_ : Dict = self.embedding_proj_norm(a__ ) UpperCAmelCase_ : Optional[int] = self.embedding_proj(a__ ) if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None: UpperCAmelCase_ : Tuple = self.encoder_hidden_states_proj(a__ ) elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None: raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' ) UpperCAmelCase_ : List[Any] = self.proj_in(a__ ) UpperCAmelCase_ : Dict = self.positional_embedding.to(hidden_states.dtype ) UpperCAmelCase_ : Optional[Any] = [] UpperCAmelCase_ : int = 0 if encoder_hidden_states is not None: additional_embeds.append(a__ ) additional_embeddings_len += encoder_hidden_states.shape[1] if len(proj_embeddings.shape ) == 2: UpperCAmelCase_ : Union[str, Any] = proj_embeddings[:, None, :] if len(hidden_states.shape ) == 2: UpperCAmelCase_ : Optional[Any] = hidden_states[:, None, :] UpperCAmelCase_ : Dict = additional_embeds + [ proj_embeddings, time_embeddings[:, None, :], hidden_states, ] if self.prd_embedding is not None: UpperCAmelCase_ : Any = self.prd_embedding.to(hidden_states.dtype ).expand(a__ , -1 , -1 ) additional_embeds.append(a__ ) UpperCAmelCase_ : Any = torch.cat( a__ , dim=1 , ) # Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens UpperCAmelCase_ : Optional[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1 if positional_embeddings.shape[1] < hidden_states.shape[1]: UpperCAmelCase_ : Tuple = F.pad( a__ , ( 0, 0, additional_embeddings_len, self.prd_embedding.shape[1] if self.prd_embedding is not None else 0, ) , value=0.0 , ) UpperCAmelCase_ : str = hidden_states + positional_embeddings if attention_mask is not None: UpperCAmelCase_ : Dict = (1 - attention_mask.to(hidden_states.dtype )) * -1_00_00.0 UpperCAmelCase_ : Union[str, Any] = F.pad(a__ , (0, self.additional_embeddings) , value=0.0 ) UpperCAmelCase_ : List[Any] = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype ) UpperCAmelCase_ : Any = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 ) if self.norm_in is not None: UpperCAmelCase_ : str = self.norm_in(a__ ) for block in self.transformer_blocks: UpperCAmelCase_ : Tuple = block(a__ , attention_mask=a__ ) UpperCAmelCase_ : Tuple = self.norm_out(a__ ) if self.prd_embedding is not None: UpperCAmelCase_ : Any = hidden_states[:, -1] else: UpperCAmelCase_ : Union[str, Any] = hidden_states[:, additional_embeddings_len:] UpperCAmelCase_ : Dict = self.proj_to_clip_embeddings(a__ ) if not return_dict: return (predicted_image_embedding,) return PriorTransformerOutput(predicted_image_embedding=a__ ) def A ( self : Optional[Any] , _A : List[Any] ) -> str: UpperCAmelCase_ : List[str] = (prior_latents * self.clip_std) + self.clip_mean return prior_latents
304
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _SCREAMING_SNAKE_CASE : Union[str, Any] = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } _SCREAMING_SNAKE_CASE : int = { "gpt-neox-20b": 2048, } class _snake_case ( lowercase_ ): lowerCAmelCase_ : str = VOCAB_FILES_NAMES lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : str = ["input_ids", "attention_mask"] def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple: '''simple docstring''' super().__init__( a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space: snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) ) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**a__ ) snake_case_ = add_prefix_space def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]: '''simple docstring''' snake_case_ = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ ) def lowerCAmelCase__ ( self , a__ ) -> List[int]: '''simple docstring''' snake_case_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] ) if len(a__ ) > self.model_max_length: snake_case_ = input_ids[-self.model_max_length :] return input_ids
85
0
"""simple docstring""" import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class __snake_case ( unittest.TestCase ): def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: Dict = logging.get_logger() # the current default level is logging.WARNING a__: Dict = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity()) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity()) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity()) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity()) # restore to the original level logging.set_verbosity(a__) def lowerCamelCase_ ( self) -> str: '''simple docstring''' a__: Tuple = logging.get_verbosity() a__: Tuple = logging.get_logger('transformers.models.bart.tokenization_bart') a__: Union[str, Any] = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a__) as cl: logger.warning(a__) self.assertEqual(cl.out , msg + '\n') # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a__) as cl: logger.warning(a__) self.assertEqual(cl.out , '') # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a__) as cl: logger.warning(a__) self.assertEqual(cl.out , msg + '\n') # restore to the original level logging.set_verbosity(a__) @mockenv(TRANSFORMERS_VERBOSITY='error') def lowerCamelCase_ ( self) -> int: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() # this action activates the env var a__: Any = logging.get_logger('transformers.models.bart.tokenization_bart') a__: Tuple = os.getenv('TRANSFORMERS_VERBOSITY' , a__) a__: Tuple = logging.log_levels[env_level_str] a__: List[Any] = logging.get_verbosity() self.assertEqual( a__ , a__ , f'TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}' , ) # restore to the original level a__: Tuple = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error') def lowerCamelCase_ ( self) -> Any: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() a__: str = logging.logging.getLogger() with CaptureLogger(a__) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart') self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out) # no need to restore as nothing was changed def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' transformers.utils.logging._reset_library_root_logger() a__: Optional[Any] = logging.get_logger('transformers.models.bart.tokenization_bart') a__: str = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1'): # nothing should be logged as env var disables this method with CaptureLogger(a__) as cl: logger.warning_advice(a__) self.assertEqual(cl.out , '') with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS=''): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a__) as cl: logger.warning_advice(a__) self.assertEqual(cl.out , msg + '\n') def __a ( ) ->Optional[Any]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
290
'''simple docstring''' import os import time import pytest from datasets.utils.filelock import FileLock, Timeout def UpperCamelCase_( snake_case : Tuple ): '''simple docstring''' snake_case_ = FileLock(str(tmpdir / "foo.lock" ) ) snake_case_ = FileLock(str(tmpdir / "foo.lock" ) ) snake_case_ = 0.01 with locka.acquire(): with pytest.raises(snake_case ): snake_case_ = time.time() locka.acquire(snake_case ) assert time.time() - _start > timeout def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = "a" * 1_0_0_0 + ".lock" snake_case_ = FileLock(str(tmpdir / filename ) ) assert locka._lock_file.endswith(".lock" ) assert not locka._lock_file.endswith(snake_case ) assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5 snake_case_ = FileLock(tmpdir / filename ) with locka.acquire(): with pytest.raises(snake_case ): locka.acquire(0 )
85
0
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer lowercase : Union[str, Any] = logging.getLogger(__name__) def A_ ( ) -> Optional[int]: a__ : Dict = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' , type=A__ , default='wikitext' , help='Name of the training. Explore datasets at: hf.co/datasets.' , ) parser.add_argument( '--dataset_config' , type=A__ , default='wikitext-103-raw-v1' , help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' , type=A__ , default='sayakpaul/unigram-tokenizer-wikitext' , help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' , ) parser.add_argument( '--shard_size' , type=A__ , default=1000 , help='Number of entries to go in a single shard.' , ) parser.add_argument('--split' , type=A__ , default='train' , choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' , default=A__ , type=A__ , help='Limit the number of shards (used for debugging).' , ) parser.add_argument( '--max_length' , type=A__ , default=512 , help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' , ) parser.add_argument( '--output_dir' , default='tf-tpu' , type=A__ , help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' , ) a__ : Any = parser.parse_args() return args def A_ ( A__ ) -> Optional[int]: def fn(A__ ): return tokenizer(examples['text'] ) return fn def A_ ( A__ ) -> List[Any]: a__ : Union[str, Any] = [] for i in range(len(tokenized_data['input_ids'] ) ): a__ : Any = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } a__ : List[Any] = tf.train.Features(feature=A__ ) a__ : Tuple = tf.train.Example(features=A__ ) a__ : int = example.SerializeToString() records.append(A__ ) return records def A_ ( A__ ) -> Optional[int]: a__ : Optional[Any] = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split ) if args.limit is not None: a__ : Union[str, Any] = min(len(A__ ) , args.limit ) a__ : Optional[Any] = dataset.select(range(A__ ) ) print(F'Limiting the dataset to {args.limit} entries.' ) a__ : int = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) a__ : Optional[Any] = os.path.join(args.output_dir , args.split ) if not os.path.exists(A__ ): os.makedirs(A__ ) else: a__ : Optional[Any] = os.path.join(args.output_dir , args.split ) # Tokenize the whole dataset at once. a__ : int = tokenize_function(A__ ) a__ : Dict = dataset.map(A__ , batched=A__ , num_proc=4 , remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(A__ ): # Concatenate all texts. a__ : int = {k: sum(examples[k] , [] ) for k in examples.keys()} a__ : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 a__ : List[str] = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. a__ : List[Any] = { k: [t[i : i + args.max_length] for i in range(0 , A__ , args.max_length )] for k, t in concatenated_examples.items() } return result a__ : Optional[Any] = dataset_tokenized.map(A__ , batched=A__ , batch_size=1000 , num_proc=4 ) a__ : Dict = 0 a__ : int = 0 for shard in range(0 , len(A__ ) , args.shard_size ): a__ : List[Any] = grouped_dataset[shard : shard + args.shard_size] a__ : Tuple = len(dataset_snapshot['input_ids'] ) a__ : Tuple = os.path.join(A__ , F'dataset-{shard_count}-{records_containing}.tfrecord' ) a__ : int = get_serialized_examples(A__ ) with tf.io.TFRecordWriter(A__ ) as out_file: for i in range(len(A__ ) ): a__ : List[Any] = serialized_examples[i] out_file.write(A__ ) print('Wrote file {} containing {} records'.format(A__ , A__ ) ) shard_count += 1 total_records += records_containing with open(F'split-{args.split}-records-count.txt' , 'w' ) as f: print(F'Total {args.split} records: {total_records}' , file=A__ ) if __name__ == "__main__": lowercase : Union[str, Any] = parse_args() main(args)
99
'''simple docstring''' import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int _SCREAMING_SNAKE_CASE : Any = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class _snake_case ( datasets.BuilderConfig ): lowerCAmelCase_ : Optional[datasets.Features] = None def UpperCamelCase_( snake_case : "pyspark.sql.DataFrame" , snake_case : List[int] , ): '''simple docstring''' import pyspark def generate_fn(): snake_case_ = df.select("*" , pyspark.sql.functions.spark_partition_id().alias("part_id" ) ) for partition_id in partition_order: snake_case_ = df_with_partition_id.select("*" ).where(f'part_id = {partition_id}' ).drop("part_id" ) snake_case_ = partition_df.collect() snake_case_ = 0 for row in rows: yield f'{partition_id}_{row_id}', row.asDict() row_id += 1 return generate_fn class _snake_case ( _BaseExamplesIterable ): def __init__( self , a__ , a__=None , ) -> Any: '''simple docstring''' snake_case_ = df snake_case_ = partition_order or range(self.df.rdd.getNumPartitions() ) snake_case_ = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self ) -> Union[str, Any]: '''simple docstring''' yield from self.generate_examples_fn() def lowerCAmelCase__ ( self , a__ ) -> "SparkExamplesIterable": '''simple docstring''' snake_case_ = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(a__ ) return SparkExamplesIterable(self.df , partition_order=a__ ) def lowerCAmelCase__ ( self , a__ , a__ ) -> "SparkExamplesIterable": '''simple docstring''' snake_case_ = self.split_shard_indices_by_worker(a__ , a__ ) return SparkExamplesIterable(self.df , partition_order=a__ ) @property def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' return len(self.partition_order ) class _snake_case ( datasets.DatasetBuilder ): lowerCAmelCase_ : Dict = SparkConfig def __init__( self , a__ , a__ = None , a__ = None , **a__ , ) -> str: '''simple docstring''' import pyspark snake_case_ = pyspark.sql.SparkSession.builder.getOrCreate() snake_case_ = df snake_case_ = working_dir super().__init__( cache_dir=a__ , config_name=str(self.df.semanticHash() ) , **a__ , ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' def create_cache_and_write_probe(a__ ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=a__ ) snake_case_ = os.path.join(self._cache_dir , "fs_test" + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(a__ , "a" ) return [probe_file] if self._spark.conf.get("spark.master" , "" ).startswith("local" ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: snake_case_ = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(a__ ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( "When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir" ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def lowerCAmelCase__ ( self , a__ ) -> Optional[Any]: '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def lowerCAmelCase__ ( self , a__ ) -> Union[str, Any]: '''simple docstring''' import pyspark def get_arrow_batch_size(a__ ): for batch in it: yield pa.RecordBatch.from_pydict({"batch_bytes": [batch.nbytes]} ) snake_case_ = self.df.count() snake_case_ = df_num_rows if df_num_rows <= 100 else 100 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. snake_case_ = ( self.df.limit(a__ ) .repartition(1 ) .mapInArrow(a__ , "batch_bytes: long" ) .agg(pyspark.sql.functions.sum("batch_bytes" ).alias("sample_bytes" ) ) .collect()[0] .sample_bytes / sample_num_rows ) snake_case_ = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. snake_case_ = min(a__ , int(approx_total_size / max_shard_size ) ) snake_case_ = self.df.repartition(a__ ) def lowerCAmelCase__ ( self , a__ , a__ , a__ , ) -> Iterable[Tuple[int, bool, Union[int, tuple]]]: '''simple docstring''' import pyspark snake_case_ = ParquetWriter if file_format == "parquet" else ArrowWriter snake_case_ = os.path.join(self._working_dir , os.path.basename(a__ ) ) if self._working_dir else fpath snake_case_ = file_format == "parquet" # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. snake_case_ = self.config.features snake_case_ = self._writer_batch_size snake_case_ = self._fs.storage_options def write_arrow(a__ ): # Within the same SparkContext, no two task attempts will share the same attempt ID. snake_case_ = pyspark.TaskContext().taskAttemptId() snake_case_ = next(a__ , a__ ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=["task_id", "num_examples", "num_bytes"] , ) snake_case_ = 0 snake_case_ = writer_class( features=a__ , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , ) snake_case_ = pa.Table.from_batches([first_batch] ) writer.write_table(a__ ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: snake_case_ , snake_case_ = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) shard_id += 1 snake_case_ = writer_class( features=writer._features , path=working_fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , writer_batch_size=a__ , storage_options=a__ , embed_local_files=a__ , ) snake_case_ = pa.Table.from_batches([batch] ) writer.write_table(a__ ) if writer._num_bytes > 0: snake_case_ , snake_case_ = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=["task_id", "num_examples", "num_bytes"] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(a__ ) ): snake_case_ = os.path.join(os.path.dirname(a__ ) , os.path.basename(a__ ) ) shutil.move(a__ , a__ ) snake_case_ = ( self.df.mapInArrow(a__ , "task_id: long, num_examples: long, num_bytes: long" ) .groupBy("task_id" ) .agg( pyspark.sql.functions.sum("num_examples" ).alias("total_num_examples" ) , pyspark.sql.functions.sum("num_bytes" ).alias("total_num_bytes" ) , pyspark.sql.functions.count("num_bytes" ).alias("num_shards" ) , pyspark.sql.functions.collect_list("num_examples" ).alias("shard_lengths" ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def lowerCAmelCase__ ( self , a__ , a__ = "arrow" , a__ = None , a__ = None , **a__ , ) -> int: '''simple docstring''' self._validate_cache_dir() snake_case_ = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(a__ ) snake_case_ = not is_remote_filesystem(self._fs ) snake_case_ = os.path.join if is_local else posixpath.join snake_case_ = "-TTTTT-SSSSS-of-NNNNN" snake_case_ = F'{self.name}-{split_generator.name}{SUFFIX}.{file_format}' snake_case_ = path_join(self._output_dir , a__ ) snake_case_ = 0 snake_case_ = 0 snake_case_ = 0 snake_case_ = [] snake_case_ = [] for task_id, content in self._prepare_split_single(a__ , a__ , a__ ): ( ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ( snake_case_ ) , ) = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(a__ ) snake_case_ = total_num_examples snake_case_ = total_num_bytes # should rename everything at the end logger.debug(F'Renaming {total_shards} shards.' ) if total_shards > 1: snake_case_ = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. snake_case_ = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( a__ , a__ , a__ , ): rename( a__ , fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace("TTTTT-SSSSS" , F'{global_shard_id:05d}' ).replace("NNNNN" , F'{total_shards:05d}' ) , ) snake_case_ = [] snake_case_ = 0 for i in range(len(a__ ) ): snake_case_ , snake_case_ = task_id_and_num_shards[i] for shard_id in range(a__ ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(a__ , len(a__ ) ).map(lambda a__ : _rename_shard(*a__ ) ).collect() else: # don't use any pattern snake_case_ = 0 snake_case_ = task_id_and_num_shards[0][0] self._rename( fpath.replace("SSSSS" , F'{shard_id:05d}' ).replace("TTTTT" , F'{task_id:05d}' ) , fpath.replace(a__ , "" ) , ) def lowerCAmelCase__ ( self , a__ , ) -> SparkExamplesIterable: '''simple docstring''' return SparkExamplesIterable(self.df )
85
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCamelCase = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ["ConditionalDetrFeatureExtractor"] UpperCamelCase = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
186
'''simple docstring''' # Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _SCREAMING_SNAKE_CASE : List[str] = { "configuration_cpmant": ["CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CpmAntConfig"], "tokenization_cpmant": ["CpmAntTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Any = [ "CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST", "CpmAntForCausalLM", "CpmAntModel", "CpmAntPreTrainedModel", ] if TYPE_CHECKING: from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig from .tokenization_cpmant import CpmAntTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_cpmant import ( CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST, CpmAntForCausalLM, CpmAntModel, CpmAntPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
'''simple docstring''' def _UpperCamelCase ( __A , __A ) -> Tuple: '''simple docstring''' return int((input_a, input_a).count(1 ) != 0 ) def _UpperCamelCase ( ) -> int: '''simple docstring''' assert or_gate(0 , 0 ) == 0 assert or_gate(0 , 1 ) == 1 assert or_gate(1 , 0 ) == 1 assert or_gate(1 , 1 ) == 1 if __name__ == "__main__": print(or_gate(0, 1)) print(or_gate(1, 0)) print(or_gate(0, 0)) print(or_gate(1, 1))
80
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu _SCREAMING_SNAKE_CASE : Union[str, Any] = False class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' return 12 @property def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' return 12 @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' return 32 @property def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) return tokenizer @property def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(a__ ) @property def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' torch.manual_seed(0 ) snake_case_ = 12 snake_case_ = 12 snake_case_ = { "attention_bias": True, "cross_attention_dim": 32, "attention_head_dim": height * width, "num_attention_heads": 1, "num_vector_embeds": self.num_embed, "num_embeds_ada_norm": self.num_embeds_ada_norm, "norm_num_groups": 32, "sample_size": width, "activation_fn": "geglu-approximate", } snake_case_ = TransformeraDModel(**a__ ) return model def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = "cpu" snake_case_ = self.dummy_vqvae snake_case_ = self.dummy_text_encoder snake_case_ = self.dummy_tokenizer snake_case_ = self.dummy_transformer snake_case_ = VQDiffusionScheduler(self.num_embed ) snake_case_ = LearnedClassifierFreeSamplingEmbeddings(learnable=a__ ) snake_case_ = VQDiffusionPipeline( vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = "teddy bear playing in the pool" snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" ) snake_case_ = output.images snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe( [prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0] snake_case_ = image[0, -3:, -3:, -1] snake_case_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case_ = np.array([0.6_5_5_1, 0.6_1_6_8, 0.5_0_0_8, 0.5_6_7_6, 0.5_6_5_9, 0.4_2_9_5, 0.6_0_7_3, 0.5_5_9_9, 0.4_9_9_2] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' snake_case_ = "cpu" snake_case_ = self.dummy_vqvae snake_case_ = self.dummy_text_encoder snake_case_ = self.dummy_tokenizer snake_case_ = self.dummy_transformer snake_case_ = VQDiffusionScheduler(self.num_embed ) snake_case_ = LearnedClassifierFreeSamplingEmbeddings( learnable=a__ , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) snake_case_ = VQDiffusionPipeline( vqvae=a__ , text_encoder=a__ , tokenizer=a__ , transformer=a__ , scheduler=a__ , learned_classifier_free_sampling_embeddings=a__ , ) snake_case_ = pipe.to(a__ ) pipe.set_progress_bar_config(disable=a__ ) snake_case_ = "teddy bear playing in the pool" snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe([prompt] , generator=a__ , num_inference_steps=2 , output_type="np" ) snake_case_ = output.images snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipe( [prompt] , generator=a__ , output_type="np" , return_dict=a__ , num_inference_steps=2 )[0] snake_case_ = image[0, -3:, -3:, -1] snake_case_ = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) snake_case_ = np.array([0.6_6_9_3, 0.6_0_7_5, 0.4_9_5_9, 0.5_7_0_1, 0.5_5_8_3, 0.4_3_3_3, 0.6_1_7_1, 0.5_6_8_4, 0.4_9_8_8] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class _snake_case ( unittest.TestCase ): def lowerCAmelCase__ ( self ) -> Optional[int]: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy" ) snake_case_ = VQDiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq" ) snake_case_ = pipeline.to(a__ ) pipeline.set_progress_bar_config(disable=a__ ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though snake_case_ = torch.Generator(device=a__ ).manual_seed(0 ) snake_case_ = pipeline( "teddy bear playing in the pool" , num_images_per_prompt=1 , generator=a__ , output_type="np" , ) snake_case_ = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
85
0
import heapq import sys import numpy as np UpperCamelCase_ = tuple[int, int] class _snake_case : '''simple docstring''' def __init__( self: Optional[int] ) -> List[Any]: UpperCAmelCase_ : Optional[int] = [] UpperCAmelCase_ : Optional[Any] = set() def A__ ( self: Optional[int] ) -> Optional[int]: if not self.empty(): return self.elements[0][0] else: return float("""inf""" ) def A__ ( self: Tuple ) -> List[str]: return len(self.elements ) == 0 def A__ ( self: Tuple ,lowerCamelCase_: Union[str, Any] ,lowerCamelCase_: Optional[Any] ) -> List[str]: if item not in self.set: heapq.heappush(self.elements ,(priority, item) ) self.set.add(a__ ) else: # update # print("update", item) UpperCAmelCase_ : Union[str, Any] = [] ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[Any] = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements ,(pro, xxx) ) def A__ ( self: int ,lowerCamelCase_: Any ) -> int: if item in self.set: self.set.remove(a__ ) UpperCAmelCase_ : Optional[Any] = [] ((UpperCAmelCase_) , (UpperCAmelCase_)) : Dict = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) ((UpperCAmelCase_) , (UpperCAmelCase_)) : List[Any] = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements ,(prito, yyy) ) def A__ ( self: List[str] ) -> List[str]: return self.elements[0][1] def A__ ( self: List[str] ) -> Optional[int]: ((UpperCAmelCase_) , (UpperCAmelCase_)) : Union[str, Any] = heapq.heappop(self.elements ) self.set.remove(a__ ) return (priority, item) def lowerCamelCase_ ( _a : TPos , _a : TPos ): '''simple docstring''' UpperCAmelCase_ : List[str] = np.array(_a ) UpperCAmelCase_ : int = np.array(_a ) return np.linalg.norm(a - b ) def lowerCamelCase_ ( _a : TPos , _a : TPos ): '''simple docstring''' return consistent_heuristic(_a , _a ) // t def lowerCamelCase_ ( _a : TPos , _a : TPos ): '''simple docstring''' return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def lowerCamelCase_ ( _a : TPos , _a : int , _a : TPos , _a : dict[TPos, float] ): '''simple docstring''' UpperCAmelCase_ : List[Any] = g_function[start] + Wa * heuristics[i](_a , _a ) return ans def lowerCamelCase_ ( _a : List[Any] , _a : List[Any] , _a : int ): '''simple docstring''' UpperCAmelCase_ : Tuple = np.chararray((n, n) ) for i in range(_a ): for j in range(_a ): UpperCAmelCase_ : List[Any] = """*""" for i in range(_a ): for j in range(_a ): if (j, (n - 1) - i) in blocks: UpperCAmelCase_ : List[str] = """#""" UpperCAmelCase_ : List[Any] = """-""" UpperCAmelCase_ : Dict = back_pointer[goal] while x != start: ((UpperCAmelCase_) , (UpperCAmelCase_)) : int = x # print(x) UpperCAmelCase_ : Union[str, Any] = """-""" UpperCAmelCase_ : Dict = back_pointer[x] UpperCAmelCase_ : List[str] = """-""" for i in range(_a ): for j in range(_a ): if (i, j) == (0, n - 1): print(grid[i][j] , end=""" """ ) print("""<-- End position""" , end=""" """ ) else: print(grid[i][j] , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) print("""PATH TAKEN BY THE ALGORITHM IS:-""" ) UpperCAmelCase_ : Any = back_pointer[goal] while x != start: print(_a , end=""" """ ) UpperCAmelCase_ : int = back_pointer[x] print(_a ) sys.exit() def lowerCamelCase_ ( _a : TPos ): '''simple docstring''' if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def lowerCamelCase_ ( _a : List[Any] , _a : int , _a : Tuple , _a : int , _a : List[str] , _a : Tuple , _a : Dict , _a : Dict , ): '''simple docstring''' for itera in range(_a ): open_list[itera].remove_element(_a ) # print("s", s) # print("j", j) ((UpperCAmelCase_) , (UpperCAmelCase_)) : Optional[int] = s UpperCAmelCase_ : Union[str, Any] = (x - 1, y) UpperCAmelCase_ : Union[str, Any] = (x + 1, y) UpperCAmelCase_ : List[Any] = (x, y + 1) UpperCAmelCase_ : int = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(_a ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(_a ) UpperCAmelCase_ : Optional[int] = -1 UpperCAmelCase_ : str = float("""inf""" ) if valid(_a ) and g_function[neighbours] > g_function[s] + 1: UpperCAmelCase_ : Union[str, Any] = g_function[s] + 1 UpperCAmelCase_ : str = s if neighbours not in close_list_anchor: open_list[0].put(_a , key(_a , 0 , _a , _a ) ) if neighbours not in close_list_inad: for var in range(1 , _a ): if key(_a , _a , _a , _a ) <= Wa * key( _a , 0 , _a , _a ): open_list[j].put( _a , key(_a , _a , _a , _a ) ) def lowerCamelCase_ ( ): '''simple docstring''' UpperCAmelCase_ : str = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list UpperCamelCase_ = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} UpperCamelCase_ = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] UpperCamelCase_ = make_common_ground() UpperCamelCase_ = blocks_blk # hyper parameters UpperCamelCase_ = 1 UpperCamelCase_ = 1 UpperCamelCase_ = 20 UpperCamelCase_ = 3 # one consistent and two other inconsistent # start and end destination UpperCamelCase_ = (0, 0) UpperCamelCase_ = (n - 1, n - 1) UpperCamelCase_ = 1 def lowerCamelCase_ ( _a : TPos , _a : TPos , _a : int ): '''simple docstring''' UpperCAmelCase_ : List[Any] = {start: 0, goal: float("""inf""" )} UpperCAmelCase_ : int = {start: -1, goal: -1} UpperCAmelCase_ : int = [] UpperCAmelCase_ : str = set() for i in range(_a ): open_list.append(PriorityQueue() ) open_list[i].put(_a , key(_a , _a , _a , _a ) ) UpperCAmelCase_ : Tuple = [] UpperCAmelCase_ : Any = [] while open_list[0].minkey() < float("""inf""" ): for i in range(1 , _a ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float("""inf""" ): do_something(_a , _a , _a ) else: UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = open_list[i].top_show() visited.add(_a ) expand_state( _a , _a , _a , _a , _a , _a , _a , _a , ) close_list_inad.append(_a ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float("""inf""" ): do_something(_a , _a , _a ) else: UpperCAmelCase_ : int = open_list[0].top_show() visited.add(_a ) expand_state( _a , 0 , _a , _a , _a , _a , _a , _a , ) close_list_anchor.append(_a ) print("""No path found to goal""" ) print() for i in range(n - 1 , -1 , -1 ): for j in range(_a ): if (j, i) in blocks: print("""#""" , end=""" """ ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print("""*""" , end=""" """ ) else: print("""-""" , end=""" """ ) else: print("""*""" , end=""" """ ) if (j, i) == (n - 1, n - 1): print("""<-- End position""" , end=""" """ ) print() print("""^""" ) print("""Start position""" ) print() print("""# is an obstacle""" ) print("""- is the path taken by algorithm""" ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
345
'''simple docstring''' from statistics import mean, stdev def UpperCamelCase_( snake_case : list , snake_case : int = 3 ): '''simple docstring''' snake_case_ = min(snake_case ) snake_case_ = max(snake_case ) # normalize data return [round((x - x_min) / (x_max - x_min) , snake_case ) for x in data] def UpperCamelCase_( snake_case : list , snake_case : int = 3 ): '''simple docstring''' snake_case_ = mean(snake_case ) snake_case_ = stdev(snake_case ) # standardize data return [round((x - mu) / (sigma) , snake_case ) for x in data]
85
0
'''simple docstring''' import shutil import tempfile import unittest from unittest.mock import patch from transformers import ( DefaultFlowCallback, IntervalStrategy, PrinterCallback, ProgressCallback, Trainer, TrainerCallback, TrainingArguments, is_torch_available, ) from transformers.testing_utils import require_torch if is_torch_available(): from transformers.trainer import DEFAULT_CALLBACKS from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel class a_ ( lowercase_ ): '''simple docstring''' def __init__( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = [] def snake_case_( self , A , A , A , **A ) -> List[str]: self.events.append("""on_init_end""" ) def snake_case_( self , A , A , A , **A ) -> List[str]: self.events.append("""on_train_begin""" ) def snake_case_( self , A , A , A , **A ) -> int: self.events.append("""on_train_end""" ) def snake_case_( self , A , A , A , **A ) -> Any: self.events.append("""on_epoch_begin""" ) def snake_case_( self , A , A , A , **A ) -> Union[str, Any]: self.events.append("""on_epoch_end""" ) def snake_case_( self , A , A , A , **A ) -> Union[str, Any]: self.events.append("""on_step_begin""" ) def snake_case_( self , A , A , A , **A ) -> Optional[Any]: self.events.append("""on_step_end""" ) def snake_case_( self , A , A , A , **A ) -> Optional[int]: self.events.append("""on_evaluate""" ) def snake_case_( self , A , A , A , **A ) -> Union[str, Any]: self.events.append("""on_predict""" ) def snake_case_( self , A , A , A , **A ) -> str: self.events.append("""on_save""" ) def snake_case_( self , A , A , A , **A ) -> Tuple: self.events.append("""on_log""" ) def snake_case_( self , A , A , A , **A ) -> str: self.events.append("""on_prediction_step""" ) @require_torch class a_ ( unittest.TestCase ): '''simple docstring''' def snake_case_( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = tempfile.mkdtemp() def snake_case_( self ) -> List[str]: shutil.rmtree(self.output_dir ) def snake_case_( self , A=0 , A=0 , A=64 , A=64 , A=None , A=False , **A ) -> int: _SCREAMING_SNAKE_CASE = RegressionDataset(length=a__ ) _SCREAMING_SNAKE_CASE = RegressionDataset(length=a__ ) _SCREAMING_SNAKE_CASE = RegressionModelConfig(a=a__ , b=a__ ) _SCREAMING_SNAKE_CASE = RegressionPreTrainedModel(a__ ) _SCREAMING_SNAKE_CASE = TrainingArguments(self.output_dir , disable_tqdm=a__ , report_to=[] , **a__ ) return Trainer( a__ , a__ , train_dataset=a__ , eval_dataset=a__ , callbacks=a__ , ) def snake_case_( self , A , A ) -> Optional[int]: self.assertEqual(len(a__ ) , len(a__ ) ) # Order doesn't matter _SCREAMING_SNAKE_CASE = sorted(a__ , key=lambda A : cb.__name__ if isinstance(a__ , a__ ) else cb.__class__.__name__ ) _SCREAMING_SNAKE_CASE = sorted(a__ , key=lambda A : cb.__name__ if isinstance(a__ , a__ ) else cb.__class__.__name__ ) for cba, cba in zip(a__ , a__ ): if isinstance(a__ , a__ ) and isinstance(a__ , a__ ): self.assertEqual(a__ , a__ ) elif isinstance(a__ , a__ ) and not isinstance(a__ , a__ ): self.assertEqual(a__ , cba.__class__ ) elif not isinstance(a__ , a__ ) and isinstance(a__ , a__ ): self.assertEqual(cba.__class__ , a__ ) else: self.assertEqual(a__ , a__ ) def snake_case_( self , A ) -> Union[str, Any]: _SCREAMING_SNAKE_CASE = ["""on_init_end""", """on_train_begin"""] _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = len(trainer.get_eval_dataloader() ) _SCREAMING_SNAKE_CASE = ["""on_prediction_step"""] * len(trainer.get_eval_dataloader() ) + ["""on_log""", """on_evaluate"""] for _ in range(trainer.state.num_train_epochs ): expected_events.append("""on_epoch_begin""" ) for _ in range(a__ ): step += 1 expected_events += ["on_step_begin", "on_step_end"] if step % trainer.args.logging_steps == 0: expected_events.append("""on_log""" ) if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0: expected_events += evaluation_events.copy() if step % trainer.args.save_steps == 0: expected_events.append("""on_save""" ) expected_events.append("""on_epoch_end""" ) if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH: expected_events += evaluation_events.copy() expected_events += ["on_log", "on_train_end"] return expected_events def snake_case_( self ) -> List[Any]: _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) # Callbacks passed at init are added to the default callbacks _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] ) expected_callbacks.append(a__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) # TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback _SCREAMING_SNAKE_CASE = self.get_trainer(disable_tqdm=a__ ) _SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [PrinterCallback] self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) def snake_case_( self ) -> str: _SCREAMING_SNAKE_CASE = DEFAULT_CALLBACKS.copy() + [ProgressCallback] _SCREAMING_SNAKE_CASE = self.get_trainer() # We can add, pop, or remove by class name trainer.remove_callback(a__ ) expected_callbacks.remove(a__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = trainer.pop_callback(a__ ) self.assertEqual(cb.__class__ , a__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) trainer.add_callback(a__ ) expected_callbacks.insert(0 , a__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) # We can also add, pop, or remove by instance _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0] trainer.remove_callback(a__ ) expected_callbacks.remove(a__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) _SCREAMING_SNAKE_CASE = self.get_trainer() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[0] _SCREAMING_SNAKE_CASE = trainer.pop_callback(a__ ) self.assertEqual(a__ , a__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) trainer.add_callback(a__ ) expected_callbacks.insert(0 , a__ ) self.check_callbacks_equality(trainer.callback_handler.callbacks , a__ ) def snake_case_( self ) -> Optional[Any]: import warnings # XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested warnings.simplefilter(action="""ignore""" , category=a__ ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(a__ , self.get_expected_events(a__ ) ) # Independent log/save/eval _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(a__ , self.get_expected_events(a__ ) ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(a__ , self.get_expected_events(a__ ) ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy="""steps""" ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(a__ , self.get_expected_events(a__ ) ) _SCREAMING_SNAKE_CASE = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy="""epoch""" ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(a__ , self.get_expected_events(a__ ) ) # A bit of everything _SCREAMING_SNAKE_CASE = self.get_trainer( callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy="""steps""" , ) trainer.train() _SCREAMING_SNAKE_CASE = trainer.callback_handler.callbacks[-2].events self.assertEqual(a__ , self.get_expected_events(a__ ) ) # warning should be emitted for duplicated callbacks with patch("""transformers.trainer_callback.logger.warning""" ) as warn_mock: _SCREAMING_SNAKE_CASE = self.get_trainer( callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , ) assert str(a__ ) in warn_mock.call_args[0][0]
58
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"configuration_xglm": ["XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XGLMConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[Any] = ["XGLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : str = ["XGLMTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : List[str] = [ "XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XGLMForCausalLM", "XGLMModel", "XGLMPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Optional[Any] = [ "FlaxXGLMForCausalLM", "FlaxXGLMModel", "FlaxXGLMPreTrainedModel", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = [ "TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXGLMForCausalLM", "TFXGLMModel", "TFXGLMPreTrainedModel", ] if TYPE_CHECKING: from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm import XGLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xglm_fast import XGLMTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xglm import ( TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXGLMForCausalLM, TFXGLMModel, TFXGLMPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure)
85
0
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class _a : def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=99, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=5, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=37, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=512, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=4, SCREAMING_SNAKE_CASE_=None, ) -> List[Any]: UpperCAmelCase_: Optional[Any] = parent UpperCAmelCase_: Optional[Any] = batch_size UpperCAmelCase_: List[Any] = seq_length UpperCAmelCase_: List[Any] = is_training UpperCAmelCase_: str = use_input_mask UpperCAmelCase_: List[str] = use_token_type_ids UpperCAmelCase_: Optional[int] = use_labels UpperCAmelCase_: Union[str, Any] = vocab_size UpperCAmelCase_: Optional[int] = hidden_size UpperCAmelCase_: Any = num_hidden_layers UpperCAmelCase_: List[Any] = num_attention_heads UpperCAmelCase_: List[str] = intermediate_size UpperCAmelCase_: Any = hidden_act UpperCAmelCase_: List[str] = hidden_dropout_prob UpperCAmelCase_: Optional[int] = attention_probs_dropout_prob UpperCAmelCase_: List[Any] = max_position_embeddings UpperCAmelCase_: Optional[Any] = type_vocab_size UpperCAmelCase_: Optional[Any] = type_sequence_label_size UpperCAmelCase_: Any = initializer_range UpperCAmelCase_: List[Any] = num_labels UpperCAmelCase_: str = num_choices UpperCAmelCase_: Tuple = scope UpperCAmelCase_: str = vocab_size - 1 def __snake_case (self ) -> Any: UpperCAmelCase_: int = ids_tensor([self.batch_size, self.seq_length], self.vocab_size ) UpperCAmelCase_: int = None if self.use_input_mask: UpperCAmelCase_: List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_: List[Any] = None if self.use_labels: UpperCAmelCase_: List[str] = ids_tensor([self.batch_size, self.seq_length], self.num_labels ) UpperCAmelCase_: List[str] = self.get_config() return config, input_ids, input_mask, token_labels def __snake_case (self ) -> Any: return GPTNeoXConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=a__, initializer_range=self.initializer_range, pad_token_id=self.pad_token_id, ) def __snake_case (self ) -> List[Any]: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = self.prepare_config_and_inputs() UpperCAmelCase_: Any = True return config, input_ids, input_mask, token_labels def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> str: UpperCAmelCase_: Tuple = GPTNeoXModel(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase_: List[Any] = model(a__, attention_mask=a__ ) UpperCAmelCase_: Union[str, Any] = model(a__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCAmelCase_: List[str] = True UpperCAmelCase_: Optional[Any] = GPTNeoXModel(a__ ) model.to(a__ ) model.eval() UpperCAmelCase_: List[str] = model(a__, attention_mask=a__ ) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> int: UpperCAmelCase_: int = GPTNeoXForCausalLM(config=a__ ) model.to(a__ ) model.eval() UpperCAmelCase_: Dict = model(a__, attention_mask=a__, labels=a__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]: UpperCAmelCase_: Union[str, Any] = self.num_labels UpperCAmelCase_: Dict = GPTNeoXForQuestionAnswering(a__ ) model.to(a__ ) model.eval() UpperCAmelCase_: Optional[Any] = model(a__, attention_mask=a__ ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict: UpperCAmelCase_: Dict = self.num_labels UpperCAmelCase_: List[str] = GPTNeoXForSequenceClassification(a__ ) model.to(a__ ) model.eval() UpperCAmelCase_: Optional[int] = ids_tensor([self.batch_size], self.type_sequence_label_size ) UpperCAmelCase_: Optional[Any] = model(a__, attention_mask=a__, labels=a__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCAmelCase_: List[str] = self.num_labels UpperCAmelCase_: Optional[Any] = GPTNeoXForTokenClassification(a__ ) model.to(a__ ) model.eval() UpperCAmelCase_: int = model(a__, attention_mask=a__, labels=a__ ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) ) def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: UpperCAmelCase_: Optional[int] = True UpperCAmelCase_: Optional[int] = GPTNeoXForCausalLM(config=a__ ) model.to(a__ ) model.eval() # first forward pass UpperCAmelCase_: List[str] = model(a__, attention_mask=a__, use_cache=a__ ) UpperCAmelCase_: Tuple = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids UpperCAmelCase_: Dict = ids_tensor((self.batch_size, 3), config.vocab_size ) UpperCAmelCase_: Dict = ids_tensor((self.batch_size, 3), vocab_size=2 ) # append to next input_ids and UpperCAmelCase_: Dict = torch.cat([input_ids, next_tokens], dim=-1 ) UpperCAmelCase_: str = torch.cat([input_mask, next_mask], dim=-1 ) UpperCAmelCase_: List[Any] = model(a__, attention_mask=a__, output_hidden_states=a__ ) UpperCAmelCase_: Optional[int] = output_from_no_past["""hidden_states"""][0] UpperCAmelCase_: Optional[int] = model( a__, attention_mask=a__, past_key_values=a__, output_hidden_states=a__, )["""hidden_states"""][0] # select random slice UpperCAmelCase_: Optional[int] = ids_tensor((1,), output_from_past.shape[-1] ).item() UpperCAmelCase_: Tuple = output_from_no_past[:, -3:, random_slice_idx].detach() UpperCAmelCase_: str = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(a__, a__, atol=1E-3 ) ) def __snake_case (self ) -> Optional[int]: UpperCAmelCase_: List[Any] = self.prepare_config_and_inputs() UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[str] = config_and_inputs UpperCAmelCase_: Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _a ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): A = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) A = (GPTNeoXForCausalLM,) if is_torch_available() else () A = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) A = False A = False A = False A = False def __snake_case (self ) -> str: UpperCAmelCase_: str = GPTNeoXModelTester(self ) UpperCAmelCase_: Dict = ConfigTester(self, config_class=a__, hidden_size=64, num_attention_heads=8 ) def __snake_case (self ) -> List[Any]: self.config_tester.run_common_tests() def __snake_case (self ) -> List[str]: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(a__, a__, a__ ) def __snake_case (self ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(a__, a__, a__ ) def __snake_case (self ) -> Dict: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() UpperCAmelCase_: Optional[Any] = None self.model_tester.create_and_check_model_as_decoder(a__, a__, a__ ) def __snake_case (self ) -> Tuple: UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(a__, a__, a__ ) def __snake_case (self ) -> str: UpperCAmelCase_: Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*a__ ) def __snake_case (self ) -> Optional[Any]: UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*a__ ) def __snake_case (self ) -> List[Any]: UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*a__ ) def __snake_case (self ) -> List[str]: UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*a__ ) @unittest.skip(reason="""Feed forward chunking is not implemented""" ) def __snake_case (self ) -> int: pass @parameterized.expand([("""linear""",), ("""dynamic""",)] ) def __snake_case (self, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]: UpperCAmelCase_ , UpperCAmelCase_: List[str] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase_: List[Any] = ids_tensor([1, 10], config.vocab_size ) UpperCAmelCase_: Any = ids_tensor([1, int(config.max_position_embeddings * 1.5 )], config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase_: int = GPTNeoXModel(a__ ) original_model.to(a__ ) original_model.eval() UpperCAmelCase_: Any = original_model(a__ ).last_hidden_state UpperCAmelCase_: str = original_model(a__ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights UpperCAmelCase_: Any = {"""type""": scaling_type, """factor""": 10.0} UpperCAmelCase_: Optional[int] = GPTNeoXModel(a__ ) scaled_model.to(a__ ) scaled_model.eval() UpperCAmelCase_: List[str] = scaled_model(a__ ).last_hidden_state UpperCAmelCase_: Dict = scaled_model(a__ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(a__, a__, atol=1E-5 ) ) else: self.assertFalse(torch.allclose(a__, a__, atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(a__, a__, atol=1E-5 ) ) @require_torch class _a ( unittest.TestCase ): @slow def __snake_case (self ) -> Dict: UpperCAmelCase_: Any = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) for checkpointing in [True, False]: UpperCAmelCase_: int = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(a__ ) UpperCAmelCase_: List[Any] = tokenizer("""My favorite food is""", return_tensors="""pt""" ).to(a__ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 UpperCAmelCase_: List[Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure""" UpperCAmelCase_: str = model.generate(**a__, do_sample=a__, max_new_tokens=20 ) UpperCAmelCase_: List[str] = tokenizer.batch_decode(a__ )[0] self.assertEqual(a__, a__ )
147
'''simple docstring''' def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] ): '''simple docstring''' snake_case_ = [0 for i in range(r + 1 )] # nc0 = 1 snake_case_ = 1 for i in range(1 , n + 1 ): # to compute current row from previous row. snake_case_ = min(snake_case , snake_case ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
85
0
from heapq import heappop, heappush import numpy as np def UpperCamelCase ( __lowercase : np.ndarray ,__lowercase : tuple[int, int] ,__lowercase : tuple[int, int] ,__lowercase : bool ,): '''simple docstring''' A_ , A_ : Optional[int] = grid.shape A_ : Optional[int] = [-1, 1, 0, 0] A_ : List[str] = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] A_ , A_ : Optional[Any] = [(0, source)], set() A_ : Dict = np.full((rows, cols) ,np.inf ) A_ : Union[str, Any] = 0 A_ : Dict = np.empty((rows, cols) ,dtype=__lowercase ) A_ : Dict = None while queue: ((A_) , (A_)) : Dict = heappop(__lowercase ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: A_ : List[str] = [] while (x, y) != source: path.append((x, y) ) A_ , A_ : Optional[Any] = predecessors[x, y] path.append(__lowercase ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__lowercase ) ): A_ , A_ : Optional[int] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: A_ : str = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__lowercase ,(dist + 1, (nx, ny)) ) A_ : Any = dist + 1 A_ : Optional[Any] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
140
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available _SCREAMING_SNAKE_CASE : Tuple = { "configuration_conditional_detr": [ "CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConditionalDetrConfig", "ConditionalDetrOnnxConfig", ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Union[str, Any] = ["ConditionalDetrFeatureExtractor"] _SCREAMING_SNAKE_CASE : List[Any] = ["ConditionalDetrImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _SCREAMING_SNAKE_CASE : Dict = [ "CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST", "ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation", "ConditionalDetrModel", "ConditionalDetrPreTrainedModel", ] if TYPE_CHECKING: from .configuration_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP, ConditionalDetrConfig, ConditionalDetrOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor from .image_processing_conditional_detr import ConditionalDetrImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_conditional_detr import ( CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST, ConditionalDetrForObjectDetection, ConditionalDetrForSegmentation, ConditionalDetrModel, ConditionalDetrPreTrainedModel, ) else: import sys _SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" from __future__ import annotations from pprint import pformat from typing import Generic, TypeVar UpperCAmelCase_ : List[str] = TypeVar("""T""") class lowerCAmelCase__ ( Generic[T] ): '''simple docstring''' def __init__( self : Any , lowercase_ : Any = True): '''simple docstring''' SCREAMING_SNAKE_CASE_ : Union[str, Any] = {} # dictionary of lists SCREAMING_SNAKE_CASE_ : Optional[int] = directed def _SCREAMING_SNAKE_CASE ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : List[Any]): '''simple docstring''' if not self.directed: # For undirected graphs # if both source vertex and destination vertex are both present in the # adjacency list, add destination vertex to source vertex list of adjacent # vertices and add source vertex to destination vertex list of adjacent # vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(a__) self.adj_list[destination_vertex].append(a__) # if only source vertex is present in adjacency list, add destination vertex # to source vertex list of adjacent vertices, then create a new vertex with # destination vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(a__) SCREAMING_SNAKE_CASE_ : List[Any] = [source_vertex] # if only destination vertex is present in adjacency list, add source vertex # to destination vertex list of adjacent vertices, then create a new vertex # with source vertex as key and assign a list containing the source vertex # as it's first adjacent vertex. elif destination_vertex in self.adj_list: self.adj_list[destination_vertex].append(a__) SCREAMING_SNAKE_CASE_ : Any = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and assign a list # containing the destination vertex as it's first adjacent vertex also # create a new vertex with destination vertex as key and assign a list # containing the source vertex as it's first adjacent vertex. else: SCREAMING_SNAKE_CASE_ : Optional[Any] = [destination_vertex] SCREAMING_SNAKE_CASE_ : List[Any] = [source_vertex] else: # For directed graphs # if both source vertex and destination vertex are present in adjacency # list, add destination vertex to source vertex list of adjacent vertices. if source_vertex in self.adj_list and destination_vertex in self.adj_list: self.adj_list[source_vertex].append(a__) # if only source vertex is present in adjacency list, add destination # vertex to source vertex list of adjacent vertices and create a new vertex # with destination vertex as key, which has no adjacent vertex elif source_vertex in self.adj_list: self.adj_list[source_vertex].append(a__) SCREAMING_SNAKE_CASE_ : List[str] = [] # if only destination vertex is present in adjacency list, create a new # vertex with source vertex as key and assign a list containing destination # vertex as first adjacent vertex elif destination_vertex in self.adj_list: SCREAMING_SNAKE_CASE_ : Tuple = [destination_vertex] # if both source vertex and destination vertex are not present in adjacency # list, create a new vertex with source vertex as key and a list containing # destination vertex as it's first adjacent vertex. Then create a new vertex # with destination vertex as key, which has no adjacent vertex else: SCREAMING_SNAKE_CASE_ : Dict = [destination_vertex] SCREAMING_SNAKE_CASE_ : int = [] return self def __repr__( self : Tuple): '''simple docstring''' return pformat(self.adj_list)
91
'''simple docstring''' import argparse from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline if __name__ == "__main__": _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the output model.") parser.add_argument( "--txt2img_unclip", default="kakaobrain/karlo-v1-alpha", type=str, required=False, help="The pretrained txt2img unclip.", ) _SCREAMING_SNAKE_CASE : int = parser.parse_args() _SCREAMING_SNAKE_CASE : List[Any] = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip) _SCREAMING_SNAKE_CASE : Union[str, Any] = CLIPImageProcessor() _SCREAMING_SNAKE_CASE : str = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-large-patch14") _SCREAMING_SNAKE_CASE : int = UnCLIPImageVariationPipeline( decoder=txtaimg.decoder, text_encoder=txtaimg.text_encoder, tokenizer=txtaimg.tokenizer, text_proj=txtaimg.text_proj, feature_extractor=feature_extractor, image_encoder=image_encoder, super_res_first=txtaimg.super_res_first, super_res_last=txtaimg.super_res_last, decoder_scheduler=txtaimg.decoder_scheduler, super_res_scheduler=txtaimg.super_res_scheduler, ) imgaimg.save_pretrained(args.dump_path)
85
0
from __future__ import annotations import math from collections.abc import Callable def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = 100 , ) -> int: '''simple docstring''' UpperCAmelCase = x_start UpperCAmelCase = fnc(UpperCamelCase__ ) UpperCAmelCase = 0.0 for _ in range(UpperCamelCase__ ): # Approximates curve as a sequence of linear lines and sums their length UpperCAmelCase = (x_end - x_start) / steps + xa UpperCAmelCase = fnc(UpperCamelCase__ ) length += math.hypot(xa - xa , fxa - fxa ) # Increment step UpperCAmelCase = xa UpperCAmelCase = fxa return length if __name__ == "__main__": def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ ) -> int: '''simple docstring''' return math.sin(10 * x ) print("f(x) = sin(10 * x)") print("The length of the curve from x = -10 to x = 10 is:") __A : Optional[int] = 10 while i <= 100_000: print(F'With {i} steps: {line_length(f, -10, 10, i)}') i *= 10
273
'''simple docstring''' import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) class _snake_case ( lowercase_ ): lowerCAmelCase_ : Any = "upernet" def __init__( self , a__=None , a__=512 , a__=0.0_2 , a__=[1, 2, 3, 6] , a__=True , a__=0.4 , a__=384 , a__=256 , a__=1 , a__=False , a__=255 , **a__ , ) -> Union[str, Any]: '''simple docstring''' super().__init__(**a__ ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) snake_case_ = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(a__ , a__ ): snake_case_ = backbone_config.get("model_type" ) snake_case_ = CONFIG_MAPPING[backbone_model_type] snake_case_ = config_class.from_dict(a__ ) snake_case_ = backbone_config snake_case_ = hidden_size snake_case_ = initializer_range snake_case_ = pool_scales snake_case_ = use_auxiliary_head snake_case_ = auxiliary_loss_weight snake_case_ = auxiliary_in_channels snake_case_ = auxiliary_channels snake_case_ = auxiliary_num_convs snake_case_ = auxiliary_concat_input snake_case_ = loss_ignore_index def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.backbone_config.to_dict() snake_case_ = self.__class__.model_type return output
85
0
'''simple docstring''' from ...configuration_utils import PretrainedConfig from ...utils import logging _UpperCamelCase : Optional[Any] = logging.get_logger(__name__) _UpperCamelCase : Tuple = { "facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json", } class snake_case__ ( lowercase_): a_ = "timesformer" def __init__( self : int , _A : Dict=2_24 , _A : str=16 , _A : Optional[int]=3 , _A : str=8 , _A : Optional[int]=7_68 , _A : List[str]=12 , _A : Dict=12 , _A : Dict=30_72 , _A : Tuple="gelu" , _A : Any=0.0 , _A : List[Any]=0.0 , _A : Any=0.02 , _A : str=1e-6 , _A : List[Any]=True , _A : Dict="divided_space_time" , _A : List[Any]=0 , **_A : Optional[Any] , ) -> Tuple: super().__init__(**a__ ) UpperCAmelCase_ : Optional[Any] = image_size UpperCAmelCase_ : Optional[int] = patch_size UpperCAmelCase_ : Dict = num_channels UpperCAmelCase_ : int = num_frames UpperCAmelCase_ : Tuple = hidden_size UpperCAmelCase_ : Optional[int] = num_hidden_layers UpperCAmelCase_ : Dict = num_attention_heads UpperCAmelCase_ : Optional[Any] = intermediate_size UpperCAmelCase_ : Tuple = hidden_act UpperCAmelCase_ : List[str] = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : Dict = initializer_range UpperCAmelCase_ : Any = layer_norm_eps UpperCAmelCase_ : List[str] = qkv_bias UpperCAmelCase_ : List[str] = attention_type UpperCAmelCase_ : List[str] = drop_path_rate
304
'''simple docstring''' import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def UpperCamelCase_( snake_case : Any ): '''simple docstring''' if ( (cp >= 0X4E00 and cp <= 0X9FFF) or (cp >= 0X3400 and cp <= 0X4DBF) # or (cp >= 0X2_0000 and cp <= 0X2_A6DF) # or (cp >= 0X2_A700 and cp <= 0X2_B73F) # or (cp >= 0X2_B740 and cp <= 0X2_B81F) # or (cp >= 0X2_B820 and cp <= 0X2_CEAF) # or (cp >= 0XF900 and cp <= 0XFAFF) or (cp >= 0X2_F800 and cp <= 0X2_FA1F) # ): # return True return False def UpperCamelCase_( snake_case : str ): '''simple docstring''' for char in word: snake_case_ = ord(snake_case ) if not _is_chinese_char(snake_case ): return 0 return 1 def UpperCamelCase_( snake_case : List[str] ): '''simple docstring''' snake_case_ = set() for token in tokens: snake_case_ = len(snake_case ) > 1 and is_chinese(snake_case ) if chinese_word: word_set.add(snake_case ) snake_case_ = list(snake_case ) return word_list def UpperCamelCase_( snake_case : List[str] , snake_case : set() ): '''simple docstring''' if not chinese_word_set: return bert_tokens snake_case_ = max([len(snake_case ) for w in chinese_word_set] ) snake_case_ = bert_tokens snake_case_ , snake_case_ = 0, len(snake_case ) while start < end: snake_case_ = True if is_chinese(bert_word[start] ): snake_case_ = min(end - start , snake_case ) for i in range(snake_case , 1 , -1 ): snake_case_ = "".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): snake_case_ = "##" + bert_word[j] snake_case_ = start + i snake_case_ = False break if single_word: start += 1 return bert_word def UpperCamelCase_( snake_case : List[str] , snake_case : LTP , snake_case : BertTokenizer ): '''simple docstring''' snake_case_ = [] for i in range(0 , len(snake_case ) , 1_0_0 ): snake_case_ = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["cws"] ).cws snake_case_ = [get_chinese_word(snake_case ) for r in res] ltp_res.extend(snake_case ) assert len(snake_case ) == len(snake_case ) snake_case_ = [] for i in range(0 , len(snake_case ) , 1_0_0 ): snake_case_ = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=snake_case , truncation=snake_case , max_length=5_1_2 ) bert_res.extend(res["input_ids"] ) assert len(snake_case ) == len(snake_case ) snake_case_ = [] for input_ids, chinese_word in zip(snake_case , snake_case ): snake_case_ = [] for id in input_ids: snake_case_ = bert_tokenizer._convert_id_to_token(snake_case ) input_tokens.append(snake_case ) snake_case_ = add_sub_symbol(snake_case , snake_case ) snake_case_ = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(snake_case ): if token[:2] == "##": snake_case_ = token[2:] # save chinese tokens' pos if len(snake_case ) == 1 and _is_chinese_char(ord(snake_case ) ): ref_id.append(snake_case ) ref_ids.append(snake_case ) assert len(snake_case ) == len(snake_case ) return ref_ids def UpperCamelCase_( snake_case : Any ): '''simple docstring''' with open(args.file_name , "r" , encoding="utf-8" ) as f: snake_case_ = f.readlines() snake_case_ = [line.strip() for line in data if len(snake_case ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' snake_case_ = LTP(args.ltp ) # faster in GPU device snake_case_ = BertTokenizer.from_pretrained(args.bert ) snake_case_ = prepare_ref(snake_case , snake_case , snake_case ) with open(args.save_path , "w" , encoding="utf-8" ) as f: snake_case_ = [json.dumps(snake_case ) + "\n" for ref in ref_ids] f.writelines(snake_case ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE : List[str] = argparse.ArgumentParser(description="prepare_chinese_ref") parser.add_argument( "--file_name", required=False, type=str, default="./resources/chinese-demo.txt", help="file need process, same as training data in lm", ) parser.add_argument( "--ltp", required=False, type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path", ) parser.add_argument( "--bert", required=False, type=str, default="./resources/robert", help="resources for Bert tokenizer", ) parser.add_argument( "--save_path", required=False, type=str, default="./resources/ref.txt", help="path to save res", ) _SCREAMING_SNAKE_CASE : Union[str, Any] = parser.parse_args() main(args)
85
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class __snake_case : @staticmethod def lowerCamelCase_ ( *lowercase , **lowercase) -> List[str]: '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class __snake_case ( unittest.TestCase ): a__ = MODEL_FOR_OBJECT_DETECTION_MAPPING def lowerCamelCase_ ( self , lowercase , lowercase , lowercase) -> Tuple: '''simple docstring''' a__: Optional[int] = ObjectDetectionPipeline(model=a__ , image_processor=a__) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def lowerCamelCase_ ( self , lowercase , lowercase) -> List[Any]: '''simple docstring''' a__: Dict = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0) self.assertGreater(len(a__) , 0) for detected_object in outputs: self.assertEqual( a__ , { 'score': ANY(a__), 'label': ANY(a__), 'box': {'xmin': ANY(a__), 'ymin': ANY(a__), 'xmax': ANY(a__), 'ymax': ANY(a__)}, } , ) import datasets a__: Union[str, Any] = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test') a__: Union[str, Any] = [ Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png'), 'http://images.cocodataset.org/val2017/000000039769.jpg', # RGBA dataset[0]['file'], # LA dataset[1]['file'], # L dataset[2]['file'], ] a__: Dict = object_detector(a__ , threshold=0.0) self.assertEqual(len(a__) , len(a__)) for outputs in batch_outputs: self.assertGreater(len(a__) , 0) for detected_object in outputs: self.assertEqual( a__ , { 'score': ANY(a__), 'label': ANY(a__), 'box': {'xmin': ANY(a__), 'ymin': ANY(a__), 'xmax': ANY(a__), 'ymax': ANY(a__)}, } , ) @require_tf @unittest.skip('Object detection not implemented in TF') def lowerCamelCase_ ( self) -> List[Any]: '''simple docstring''' pass @require_torch def lowerCamelCase_ ( self) -> List[str]: '''simple docstring''' a__: List[str] = 'hf-internal-testing/tiny-detr-mobilenetsv3' a__: Any = AutoModelForObjectDetection.from_pretrained(a__) a__: Union[str, Any] = AutoFeatureExtractor.from_pretrained(a__) a__: Union[str, Any] = ObjectDetectionPipeline(model=a__ , feature_extractor=a__) a__: List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0) self.assertEqual( nested_simplify(a__ , decimals=4) , [ {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ] , ) a__: Any = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ] , threshold=0.0 , ) self.assertEqual( nested_simplify(a__ , decimals=4) , [ [ {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ], [ {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, {'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}}, ], ] , ) @require_torch @slow def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: int = 'facebook/detr-resnet-50' a__: str = AutoModelForObjectDetection.from_pretrained(a__) a__: Dict = AutoFeatureExtractor.from_pretrained(a__) a__: Optional[Any] = ObjectDetectionPipeline(model=a__ , feature_extractor=a__) a__: str = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg') self.assertEqual( nested_simplify(a__ , decimals=4) , [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) a__: Any = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ]) self.assertEqual( nested_simplify(a__ , decimals=4) , [ [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], ] , ) @require_torch @slow def lowerCamelCase_ ( self) -> Optional[Any]: '''simple docstring''' a__: int = 'facebook/detr-resnet-50' a__: int = pipeline('object-detection' , model=a__) a__: Union[str, Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg') self.assertEqual( nested_simplify(a__ , decimals=4) , [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) a__: Optional[Any] = object_detector( [ 'http://images.cocodataset.org/val2017/000000039769.jpg', 'http://images.cocodataset.org/val2017/000000039769.jpg', ]) self.assertEqual( nested_simplify(a__ , decimals=4) , [ [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], [ {'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}}, {'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}}, {'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}}, {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ], ] , ) @require_torch @slow def lowerCamelCase_ ( self) -> Union[str, Any]: '''simple docstring''' a__: Tuple = 0.9985 a__: List[str] = 'facebook/detr-resnet-50' a__: Union[str, Any] = pipeline('object-detection' , model=a__) a__: Tuple = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=a__) self.assertEqual( nested_simplify(a__ , decimals=4) , [ {'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}}, {'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}}, ] , ) @require_torch @require_pytesseract @slow def lowerCamelCase_ ( self) -> Tuple: '''simple docstring''' a__: Optional[int] = 'Narsil/layoutlmv3-finetuned-funsd' a__: str = 0.9993 a__: str = pipeline('object-detection' , model=a__ , threshold=a__) a__: List[str] = object_detector( 'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png') self.assertEqual( nested_simplify(a__ , decimals=4) , [ {'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}}, {'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}}, ] , )
290
'''simple docstring''' import warnings from functools import wraps from typing import Callable def UpperCamelCase_( snake_case : Callable ): '''simple docstring''' @wraps(snake_case ) def _inner_fn(*snake_case : Optional[int] , **snake_case : List[Any] ): warnings.warn( (f'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , snake_case , ) return fn(*snake_case , **snake_case ) return _inner_fn
85
0
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_camembert import CamembertTokenizer else: lowercase : Optional[Any] = None lowercase : Dict = logging.get_logger(__name__) lowercase : int = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"} lowercase : List[str] = { "vocab_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model", }, "tokenizer_file": { "camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json", }, } lowercase : Any = { "camembert-base": 5_1_2, } lowercase : Union[str, Any] = "▁" class A__ ( lowercase_ ): """simple docstring""" __A : Union[str, Any] = VOCAB_FILES_NAMES __A : List[Any] = PRETRAINED_VOCAB_FILES_MAP __A : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : Optional[int] = ["input_ids", "attention_mask"] __A : Optional[int] = CamembertTokenizer def __init__( self , lowercase=None , lowercase=None , lowercase="<s>" , lowercase="</s>" , lowercase="</s>" , lowercase="<s>" , lowercase="<unk>" , lowercase="<pad>" , lowercase="<mask>" , lowercase=["<s>NOTUSED", "</s>NOTUSED"] , **lowercase , ) -> List[str]: '''simple docstring''' a__ : int = AddedToken(a__ , lstrip=a__ , rstrip=a__) if isinstance(a__ , a__) else mask_token super().__init__( a__ , tokenizer_file=a__ , bos_token=a__ , eos_token=a__ , sep_token=a__ , cls_token=a__ , unk_token=a__ , pad_token=a__ , mask_token=a__ , additional_special_tokens=a__ , **a__ , ) a__ : int = vocab_file a__ : Tuple = False if not self.vocab_file else True def __lowercase ( self , lowercase , lowercase = None) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] a__ : Optional[int] = [self.cls_token_id] a__ : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __lowercase ( self , lowercase , lowercase = None) -> List[int]: '''simple docstring''' a__ : Any = [self.sep_token_id] a__ : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep) * [0] def __lowercase ( self , lowercase , lowercase = None) -> Tuple[str]: '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( 'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow ' 'tokenizer.') if not os.path.isdir(a__): logger.error(F'Vocabulary path ({save_directory}) should be a directory') return a__ : Optional[int] = os.path.join( a__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file']) if os.path.abspath(self.vocab_file) != os.path.abspath(a__): copyfile(self.vocab_file , a__) return (out_vocab_file,)
99
'''simple docstring''' from __future__ import annotations import requests def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(snake_case ).json() def UpperCamelCase_( snake_case : int = 1_0 ): '''simple docstring''' snake_case_ = "https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty" snake_case_ = requests.get(snake_case ).json()[:max_stories] return [get_hackernews_story(snake_case ) for story_id in story_ids] def UpperCamelCase_( snake_case : int = 1_0 ): '''simple docstring''' snake_case_ = hackernews_top_stories(snake_case ) return "\n".join("* [{title}]({url})".format(**snake_case ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
85
0
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ): if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('''\'float\' object cannot be interpreted as an integer''' ) if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): raise TypeError('''\'str\' object cannot be interpreted as an integer''' ) if num == 0: return "0b0" A_ : int = False if num < 0: A_ : Dict = True A_ : Tuple = -num A_ : Optional[Any] = [] while num > 0: binary.insert(0 , num % 2 ) num >>= 1 if negative: return "-0b" + "".join(str(SCREAMING_SNAKE_CASE ) for e in binary ) return "0b" + "".join(str(SCREAMING_SNAKE_CASE ) for e in binary ) if __name__ == "__main__": import doctest doctest.testmod()
186
'''simple docstring''' import unittest from transformers import ( MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING, TextGenerationPipeline, logging, pipeline, ) from transformers.testing_utils import ( CaptureLogger, is_pipeline_test, require_accelerate, require_tf, require_torch, require_torch_gpu, require_torch_or_tf, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf class _snake_case ( unittest.TestCase ): lowerCAmelCase_ : Optional[Any] = MODEL_FOR_CAUSAL_LM_MAPPING lowerCAmelCase_ : Optional[Any] = TF_MODEL_FOR_CAUSAL_LM_MAPPING @require_torch def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="pt" ) # Using `do_sample=False` to force deterministic output snake_case_ = text_generator("This is a test" , do_sample=a__ ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ] , ) snake_case_ = text_generator(["This is a test", "This is a second test"] ) self.assertEqual( a__ , [ [ { "generated_text": ( "This is a test ☃ ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy oscope." " oscope. FiliFili@@" ) } ], [ { "generated_text": ( "This is a second test ☃ segmental segmental segmental 议议eski eski flutter flutter Lacy" " oscope. oscope. FiliFili@@" ) } ], ] , ) snake_case_ = text_generator("This is a test" , do_sample=a__ , num_return_sequences=2 , return_tensors=a__ ) self.assertEqual( a__ , [ {"generated_token_ids": ANY(a__ )}, {"generated_token_ids": ANY(a__ )}, ] , ) snake_case_ = text_generator.model.config.eos_token_id snake_case_ = "<pad>" snake_case_ = text_generator( ["This is a test", "This is a second test"] , do_sample=a__ , num_return_sequences=2 , batch_size=2 , return_tensors=a__ , ) self.assertEqual( a__ , [ [ {"generated_token_ids": ANY(a__ )}, {"generated_token_ids": ANY(a__ )}, ], [ {"generated_token_ids": ANY(a__ )}, {"generated_token_ids": ANY(a__ )}, ], ] , ) @require_tf def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = pipeline(task="text-generation" , model="sshleifer/tiny-ctrl" , framework="tf" ) # Using `do_sample=False` to force deterministic output snake_case_ = text_generator("This is a test" , do_sample=a__ ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ] , ) snake_case_ = text_generator(["This is a test", "This is a second test"] , do_sample=a__ ) self.assertEqual( a__ , [ [ { "generated_text": ( "This is a test FeyFeyFey(Croatis.), s.), Cannes Cannes Cannes 閲閲Cannes Cannes Cannes 攵" " please," ) } ], [ { "generated_text": ( "This is a second test Chieftain Chieftain prefecture prefecture prefecture Cannes Cannes" " Cannes 閲閲Cannes Cannes Cannes 攵 please," ) } ], ] , ) def lowerCAmelCase__ ( self , a__ , a__ , a__ ) -> str: '''simple docstring''' snake_case_ = TextGenerationPipeline(model=a__ , tokenizer=a__ ) return text_generator, ["This is a test", "Another test"] def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = "Hello I believe in" snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) snake_case_ = text_generator(a__ ) self.assertEqual( a__ , [{"generated_text": "Hello I believe in fe fe fe fe fe fe fe fe fe fe fe fe"}] , ) snake_case_ = text_generator(a__ , stop_sequence=" fe" ) self.assertEqual(a__ , [{"generated_text": "Hello I believe in fe"}] ) def lowerCAmelCase__ ( self , a__ , a__ ) -> Tuple: '''simple docstring''' snake_case_ = text_generator.model snake_case_ = text_generator.tokenizer snake_case_ = text_generator("This is a test" ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) snake_case_ = text_generator("This is a test" , return_full_text=a__ ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) snake_case_ = pipeline(task="text-generation" , model=a__ , tokenizer=a__ , return_full_text=a__ ) snake_case_ = text_generator("This is a test" ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertNotIn("This is a test" , outputs[0]["generated_text"] ) snake_case_ = text_generator("This is a test" , return_full_text=a__ ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) self.assertTrue(outputs[0]["generated_text"].startswith("This is a test" ) ) snake_case_ = text_generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=a__ ) self.assertEqual( a__ , [ [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], ] , ) if text_generator.tokenizer.pad_token is not None: snake_case_ = text_generator( ["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=a__ ) self.assertEqual( a__ , [ [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], [{"generated_text": ANY(a__ )}, {"generated_text": ANY(a__ )}], ] , ) with self.assertRaises(a__ ): snake_case_ = text_generator("test" , return_full_text=a__ , return_text=a__ ) with self.assertRaises(a__ ): snake_case_ = text_generator("test" , return_full_text=a__ , return_tensors=a__ ) with self.assertRaises(a__ ): snake_case_ = text_generator("test" , return_text=a__ , return_tensors=a__ ) # Empty prompt is slighly special # it requires BOS token to exist. # Special case for Pegasus which will always append EOS so will # work even without BOS. if ( text_generator.tokenizer.bos_token_id is not None or "Pegasus" in tokenizer.__class__.__name__ or "Git" in model.__class__.__name__ ): snake_case_ = text_generator("" ) self.assertEqual(a__ , [{"generated_text": ANY(a__ )}] ) else: with self.assertRaises((ValueError, AssertionError) ): snake_case_ = text_generator("" ) if text_generator.framework == "tf": # TF generation does not support max_new_tokens, and it's impossible # to control long generation with only max_length without # fancy calculation, dismissing tests for now. return # We don't care about infinite range models. # They already work. # Skip this test for XGLM, since it uses sinusoidal positional embeddings which are resized on-the-fly. snake_case_ = ["RwkvForCausalLM", "XGLMForCausalLM", "GPTNeoXForCausalLM"] if ( tokenizer.model_max_length < 10_000 and text_generator.model.__class__.__name__ not in EXTRA_MODELS_CAN_HANDLE_LONG_INPUTS ): # Handling of large generations with self.assertRaises((RuntimeError, IndexError, ValueError, AssertionError) ): text_generator("This is a test" * 500 , max_new_tokens=20 ) snake_case_ = text_generator("This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=20 ) # Hole strategy cannot work with self.assertRaises(a__ ): text_generator( "This is a test" * 500 , handle_long_generation="hole" , max_new_tokens=tokenizer.model_max_length + 10 , ) @require_torch @require_accelerate @require_torch_gpu def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' import torch # Classic `model_kwargs` snake_case_ = pipeline( model="hf-internal-testing/tiny-random-bloom" , model_kwargs={"device_map": "auto", "torch_dtype": torch.bfloataa} , ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) snake_case_ = pipe("This is a test" ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # Upgraded those two to real pipeline arguments (they just get sent for the model as they're unlikely to mean anything else.) snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.bfloataa ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.bfloataa ) snake_case_ = pipe("This is a test" ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) # torch_dtype will be automatically set to float32 if not provided - check: https://github.com/huggingface/transformers/pull/20602 snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" ) self.assertEqual(pipe.model.device , torch.device(0 ) ) self.assertEqual(pipe.model.lm_head.weight.dtype , torch.floataa ) snake_case_ = pipe("This is a test" ) self.assertEqual( a__ , [ { "generated_text": ( "This is a test test test test test test test test test test test test test test test test" " test" ) } ] , ) @require_torch @require_torch_gpu def lowerCAmelCase__ ( self ) -> Union[str, Any]: '''simple docstring''' import torch snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device=0 , torch_dtype=torch.floataa ) pipe("This is a test" ) @require_torch @require_accelerate @require_torch_gpu def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' import torch snake_case_ = pipeline(model="hf-internal-testing/tiny-random-bloom" , device_map="auto" , torch_dtype=torch.floataa ) pipe("This is a test" , do_sample=a__ , top_p=0.5 ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = "Hello world" snake_case_ = pipeline("text-generation" , model="hf-internal-testing/tiny-random-gpt2" ) if text_generator.model.framework == "tf": snake_case_ = logging.get_logger("transformers.generation.tf_utils" ) else: snake_case_ = logging.get_logger("transformers.generation.utils" ) snake_case_ = "Both `max_new_tokens`" # The beggining of the message to be checked in this test # Both are set by the user -> log warning with CaptureLogger(a__ ) as cl: snake_case_ = text_generator(a__ , max_length=10 , max_new_tokens=1 ) self.assertIn(a__ , cl.out ) # The user only sets one -> no warning with CaptureLogger(a__ ) as cl: snake_case_ = text_generator(a__ , max_new_tokens=1 ) self.assertNotIn(a__ , cl.out ) with CaptureLogger(a__ ) as cl: snake_case_ = text_generator(a__ , max_length=10 ) self.assertNotIn(a__ , cl.out )
85
0
'''simple docstring''' import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class lowercase_ ( unittest.TestCase ): def __a ( self ): UpperCamelCase__ = "ZinengTang/tvlt-base" UpperCamelCase__ = tempfile.mkdtemp() def __a ( self , **a ): return TvltImageProcessor.from_pretrained(self.checkpoint , **a__ ) def __a ( self , **a ): return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a__ ) def __a ( self ): shutil.rmtree(self.tmpdirname ) def __a ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=a__ , feature_extractor=a__ ) processor.save_pretrained(self.tmpdirname ) UpperCamelCase__ = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , a__ ) self.assertIsInstance(processor.image_processor , a__ ) def __a ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=a__ , feature_extractor=a__ ) UpperCamelCase__ = np.ones([1_20_00] ) UpperCamelCase__ = feature_extractor(a__ , return_tensors="np" ) UpperCamelCase__ = processor(audio=a__ , return_tensors="np" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __a ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=a__ , feature_extractor=a__ ) UpperCamelCase__ = np.ones([3, 2_24, 2_24] ) UpperCamelCase__ = image_processor(a__ , return_tensors="np" ) UpperCamelCase__ = processor(images=a__ , return_tensors="np" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __a ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=a__ , feature_extractor=a__ ) UpperCamelCase__ = np.ones([1_20_00] ) UpperCamelCase__ = np.ones([3, 2_24, 2_24] ) UpperCamelCase__ = processor(audio=a__ , images=a__ ) self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] ) # test if it raises when no input is passed with pytest.raises(a__ ): processor() def __a ( self ): UpperCamelCase__ = self.get_image_processor() UpperCamelCase__ = self.get_feature_extractor() UpperCamelCase__ = TvltProcessor(image_processor=a__ , feature_extractor=a__ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
80
'''simple docstring''' import unittest from transformers import load_tool from .test_tools_common import ToolTesterMixin class _snake_case ( unittest.TestCase , lowercase_ ): def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' snake_case_ = load_tool("text-classification" ) self.tool.setup() snake_case_ = load_tool("text-classification" , remote=a__ ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = self.tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = self.remote_tool("That's quite cool" , ["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = self.tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" ) def lowerCAmelCase__ ( self ) -> Optional[Any]: '''simple docstring''' snake_case_ = self.remote_tool(text="That's quite cool" , labels=["positive", "negative"] ) self.assertEqual(a__ , "positive" )
85
0
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase_ = logging.get_logger(__name__) UpperCamelCase_ = { "SCUT-DLVCLab/lilt-roberta-en-base": ( "https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json" ), } class _snake_case ( lowercase_ ): '''simple docstring''' A__ : Any = "lilt" def __init__( self: int ,lowerCamelCase_: Union[str, Any]=30522 ,lowerCamelCase_: Optional[int]=768 ,lowerCamelCase_: Optional[int]=12 ,lowerCamelCase_: List[str]=12 ,lowerCamelCase_: List[Any]=3072 ,lowerCamelCase_: Tuple="gelu" ,lowerCamelCase_: Dict=0.1 ,lowerCamelCase_: Optional[int]=0.1 ,lowerCamelCase_: Any=512 ,lowerCamelCase_: Optional[int]=2 ,lowerCamelCase_: List[str]=0.0_2 ,lowerCamelCase_: str=1e-12 ,lowerCamelCase_: int=0 ,lowerCamelCase_: Optional[int]="absolute" ,lowerCamelCase_: Dict=None ,lowerCamelCase_: str=4 ,lowerCamelCase_: List[str]=1024 ,**lowerCamelCase_: Dict ,) -> List[Any]: super().__init__(pad_token_id=a__ ,**a__ ) UpperCAmelCase_ : Dict = vocab_size UpperCAmelCase_ : Optional[int] = hidden_size UpperCAmelCase_ : Any = num_hidden_layers UpperCAmelCase_ : Tuple = num_attention_heads UpperCAmelCase_ : int = hidden_act UpperCAmelCase_ : Dict = intermediate_size UpperCAmelCase_ : Optional[Any] = hidden_dropout_prob UpperCAmelCase_ : Tuple = attention_probs_dropout_prob UpperCAmelCase_ : str = max_position_embeddings UpperCAmelCase_ : Optional[int] = type_vocab_size UpperCAmelCase_ : Tuple = initializer_range UpperCAmelCase_ : List[Any] = layer_norm_eps UpperCAmelCase_ : int = position_embedding_type UpperCAmelCase_ : Any = classifier_dropout UpperCAmelCase_ : Union[str, Any] = channel_shrink_ratio UpperCAmelCase_ : str = max_ad_position_embeddings
345
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging _SCREAMING_SNAKE_CASE : Tuple = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Dict = { "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json", "BridgeTower/bridgetower-base-itm-mlm": ( "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json" ), } class _snake_case ( lowercase_ ): lowerCAmelCase_ : Dict = "bridgetower_vision_model" def __init__( self , a__=768 , a__=12 , a__=3 , a__=16 , a__=288 , a__=1 , a__=1e-05 , a__=False , a__=True , a__=False , **a__ , ) -> int: '''simple docstring''' super().__init__(**a__ ) snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_channels snake_case_ = patch_size snake_case_ = image_size snake_case_ = initializer_factor snake_case_ = layer_norm_eps snake_case_ = stop_gradient snake_case_ = share_layernorm snake_case_ = remove_last_layer @classmethod def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig": '''simple docstring''' snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ ) if config_dict.get("model_type" ) == "bridgetower": snake_case_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a__ , **a__ ) class _snake_case ( lowercase_ ): lowerCAmelCase_ : Any = "bridgetower_text_model" def __init__( self , a__=50_265 , a__=768 , a__=12 , a__=12 , a__=1 , a__=3_072 , a__="gelu" , a__=0.1 , a__=0.1 , a__=514 , a__=1 , a__=1e-05 , a__=1 , a__=0 , a__=2 , a__="absolute" , a__=True , **a__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**a__ ) snake_case_ = vocab_size snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = hidden_act snake_case_ = initializer_factor snake_case_ = intermediate_size snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = max_position_embeddings snake_case_ = type_vocab_size snake_case_ = layer_norm_eps snake_case_ = position_embedding_type snake_case_ = use_cache snake_case_ = pad_token_id snake_case_ = bos_token_id snake_case_ = eos_token_id @classmethod def lowerCAmelCase__ ( cls , a__ , **a__ ) -> "PretrainedConfig": '''simple docstring''' snake_case_ , snake_case_ = cls.get_config_dict(a__ , **a__ ) if config_dict.get("model_type" ) == "bridgetower": snake_case_ = config_dict["text_config"] if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a__ , **a__ ) class _snake_case ( lowercase_ ): lowerCAmelCase_ : Union[str, Any] = "bridgetower" def __init__( self , a__=True , a__="gelu" , a__=768 , a__=1 , a__=1e-05 , a__=False , a__="add" , a__=12 , a__=6 , a__=False , a__=False , a__=None , a__=None , **a__ , ) -> int: '''simple docstring''' snake_case_ = kwargs.pop("text_config_dict" , a__ ) snake_case_ = kwargs.pop("vision_config_dict" , a__ ) super().__init__(**a__ ) snake_case_ = share_cross_modal_transformer_layers snake_case_ = hidden_act snake_case_ = hidden_size snake_case_ = initializer_factor snake_case_ = layer_norm_eps snake_case_ = share_link_tower_layers snake_case_ = link_tower_type snake_case_ = num_attention_heads snake_case_ = num_hidden_layers snake_case_ = tie_word_embeddings snake_case_ = init_layernorm_from_vision_encoder if text_config is None: snake_case_ = {} logger.info("`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values." ) if vision_config is None: snake_case_ = {} logger.info("`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values." ) snake_case_ = BridgeTowerTextConfig(**a__ ) snake_case_ = BridgeTowerVisionConfig(**a__ ) @classmethod def lowerCAmelCase__ ( cls , a__ , a__ , **a__ ) -> List[Any]: '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a__ ) def lowerCAmelCase__ ( self ) -> List[Any]: '''simple docstring''' snake_case_ = copy.deepcopy(self.__dict__ ) snake_case_ = self.text_config.to_dict() snake_case_ = self.vision_config.to_dict() snake_case_ = self.__class__.model_type return output
85
0
'''simple docstring''' class a_ : '''simple docstring''' def __init__( self ) -> Optional[int]: _SCREAMING_SNAKE_CASE = """""" _SCREAMING_SNAKE_CASE = """""" _SCREAMING_SNAKE_CASE = [] def snake_case_( self , A , A ) -> int: if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: _SCREAMING_SNAKE_CASE = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: _SCREAMING_SNAKE_CASE = self.__min_dist_top_down_dp(a__ , n - 1 ) _SCREAMING_SNAKE_CASE = self.__min_dist_top_down_dp(m - 1 , a__ ) _SCREAMING_SNAKE_CASE = self.__min_dist_top_down_dp(m - 1 , n - 1 ) _SCREAMING_SNAKE_CASE = 1 + min(a__ , a__ , a__ ) return self.dp[m][n] def snake_case_( self , A , A ) -> int: _SCREAMING_SNAKE_CASE = worda _SCREAMING_SNAKE_CASE = worda _SCREAMING_SNAKE_CASE = [[-1 for _ in range(len(a__ ) )] for _ in range(len(a__ ) )] return self.__min_dist_top_down_dp(len(a__ ) - 1 , len(a__ ) - 1 ) def snake_case_( self , A , A ) -> int: _SCREAMING_SNAKE_CASE = worda _SCREAMING_SNAKE_CASE = worda _SCREAMING_SNAKE_CASE = len(a__ ) _SCREAMING_SNAKE_CASE = len(a__ ) _SCREAMING_SNAKE_CASE = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty _SCREAMING_SNAKE_CASE = j elif j == 0: # second string is empty _SCREAMING_SNAKE_CASE = i elif worda[i - 1] == worda[j - 1]: # last characters are equal _SCREAMING_SNAKE_CASE = self.dp[i - 1][j - 1] else: _SCREAMING_SNAKE_CASE = self.dp[i][j - 1] _SCREAMING_SNAKE_CASE = self.dp[i - 1][j] _SCREAMING_SNAKE_CASE = self.dp[i - 1][j - 1] _SCREAMING_SNAKE_CASE = 1 + min(a__ , a__ , a__ ) return self.dp[m][n] if __name__ == "__main__": lowercase_ = EditDistance() print("""****************** Testing Edit Distance DP Algorithm ******************""") print() lowercase_ = input("""Enter the first string: """).strip() lowercase_ = input("""Enter the second string: """).strip() print() print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""") print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""") print() print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
58
'''simple docstring''' from __future__ import annotations def UpperCamelCase_( snake_case : list[int] ): '''simple docstring''' return len(set(snake_case ) ) == len(snake_case ) if __name__ == "__main__": import doctest doctest.testmod()
85
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : Dict = { "BridgeTower/bridgetower-base": "https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json", "BridgeTower/bridgetower-base-itm-mlm": ( "https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json" ), } class _a ( lowercase_ ): A = "bridgetower_vision_model" def __init__(self, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=16, SCREAMING_SNAKE_CASE_=288, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=1E-05, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=False, **SCREAMING_SNAKE_CASE_, ) -> int: super().__init__(**a__ ) UpperCAmelCase_: List[str] = hidden_size UpperCAmelCase_: List[Any] = num_hidden_layers UpperCAmelCase_: Dict = num_channels UpperCAmelCase_: Optional[int] = patch_size UpperCAmelCase_: Optional[int] = image_size UpperCAmelCase_: Any = initializer_factor UpperCAmelCase_: Optional[Any] = layer_norm_eps UpperCAmelCase_: int = stop_gradient UpperCAmelCase_: List[Any] = share_layernorm UpperCAmelCase_: Tuple = remove_last_layer @classmethod def __snake_case (cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig": UpperCAmelCase_ , UpperCAmelCase_: Optional[int] = cls.get_config_dict(a__, **a__ ) if config_dict.get("""model_type""" ) == "bridgetower": UpperCAmelCase_: Union[str, Any] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a__, **a__ ) class _a ( lowercase_ ): A = "bridgetower_text_model" def __init__(self, SCREAMING_SNAKE_CASE_=50265, SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=3072, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=514, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=1E-05, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=0, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_="absolute", SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: super().__init__(**a__ ) UpperCAmelCase_: List[Any] = vocab_size UpperCAmelCase_: Any = hidden_size UpperCAmelCase_: Optional[int] = num_hidden_layers UpperCAmelCase_: int = num_attention_heads UpperCAmelCase_: List[str] = hidden_act UpperCAmelCase_: str = initializer_factor UpperCAmelCase_: Optional[int] = intermediate_size UpperCAmelCase_: Union[str, Any] = hidden_dropout_prob UpperCAmelCase_: Any = attention_probs_dropout_prob UpperCAmelCase_: Any = max_position_embeddings UpperCAmelCase_: Any = type_vocab_size UpperCAmelCase_: int = layer_norm_eps UpperCAmelCase_: int = position_embedding_type UpperCAmelCase_: Optional[int] = use_cache UpperCAmelCase_: List[str] = pad_token_id UpperCAmelCase_: Union[str, Any] = bos_token_id UpperCAmelCase_: int = eos_token_id @classmethod def __snake_case (cls, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> "PretrainedConfig": UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = cls.get_config_dict(a__, **a__ ) if config_dict.get("""model_type""" ) == "bridgetower": UpperCAmelCase_: Tuple = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls, """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a__, **a__ ) class _a ( lowercase_ ): A = "bridgetower" def __init__(self, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="gelu", SCREAMING_SNAKE_CASE_=768, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=1E-05, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_="add", SCREAMING_SNAKE_CASE_=12, SCREAMING_SNAKE_CASE_=6, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, **SCREAMING_SNAKE_CASE_, ) -> int: UpperCAmelCase_: Optional[int] = kwargs.pop("""text_config_dict""", a__ ) UpperCAmelCase_: Tuple = kwargs.pop("""vision_config_dict""", a__ ) super().__init__(**a__ ) UpperCAmelCase_: Dict = share_cross_modal_transformer_layers UpperCAmelCase_: List[str] = hidden_act UpperCAmelCase_: Union[str, Any] = hidden_size UpperCAmelCase_: List[str] = initializer_factor UpperCAmelCase_: List[Any] = layer_norm_eps UpperCAmelCase_: str = share_link_tower_layers UpperCAmelCase_: Optional[Any] = link_tower_type UpperCAmelCase_: List[Any] = num_attention_heads UpperCAmelCase_: Optional[int] = num_hidden_layers UpperCAmelCase_: str = tie_word_embeddings UpperCAmelCase_: int = init_layernorm_from_vision_encoder if text_config is None: UpperCAmelCase_: List[str] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" ) if vision_config is None: UpperCAmelCase_: int = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" ) UpperCAmelCase_: List[str] = BridgeTowerTextConfig(**a__ ) UpperCAmelCase_: Any = BridgeTowerVisionConfig(**a__ ) @classmethod def __snake_case (cls, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_ ) -> List[Any]: return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **a__ ) def __snake_case (self ) -> List[Any]: UpperCAmelCase_: int = copy.deepcopy(self.__dict__ ) UpperCAmelCase_: Union[str, Any] = self.text_config.to_dict() UpperCAmelCase_: Union[str, Any] = self.vision_config.to_dict() UpperCAmelCase_: Optional[Any] = self.__class__.model_type return output
147
'''simple docstring''' import builtins import sys from ...utils.imports import _is_package_available from . import cursor, input from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor from .keymap import KEYMAP _SCREAMING_SNAKE_CASE : Any = False try: _SCREAMING_SNAKE_CASE : Optional[Any] = _is_package_available("google.colab") except ModuleNotFoundError: pass @input.register class _snake_case : def __init__( self , a__ = None , a__ = [] ) -> List[str]: '''simple docstring''' snake_case_ = 0 snake_case_ = choices snake_case_ = prompt if sys.platform == "win32": snake_case_ = "*" else: snake_case_ = "➔ " def lowerCAmelCase__ ( self , a__ , a__ = "" ) -> int: '''simple docstring''' if sys.platform != "win32": writeColor(self.choices[index] , 32 , a__ ) else: forceWrite(self.choices[index] , a__ ) def lowerCAmelCase__ ( self , a__ ) -> Tuple: '''simple docstring''' if index == self.position: forceWrite(F' {self.arrow_char} ' ) self.write_choice(a__ ) else: forceWrite(F' {self.choices[index]}' ) reset_cursor() def lowerCAmelCase__ ( self , a__ , a__ = 1 ) -> List[str]: '''simple docstring''' snake_case_ = self.position if direction == Direction.DOWN: if self.position + 1 >= len(self.choices ): return self.position += num_spaces else: if self.position - 1 < 0: return self.position -= num_spaces clear_line() self.print_choice(a__ ) move_cursor(a__ , direction.name ) self.print_choice(self.position ) @input.mark(KEYMAP["up"] ) def lowerCAmelCase__ ( self ) -> Dict: '''simple docstring''' self.move_direction(Direction.UP ) @input.mark(KEYMAP["down"] ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' self.move_direction(Direction.DOWN ) @input.mark(KEYMAP["newline"] ) def lowerCAmelCase__ ( self ) -> str: '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) return self.position @input.mark(KEYMAP["interrupt"] ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' move_cursor(len(self.choices ) - self.position , "DOWN" ) raise KeyboardInterrupt @input.mark_multiple(*[KEYMAP[str(a__ )] for number in range(10 )] ) def lowerCAmelCase__ ( self ) -> int: '''simple docstring''' snake_case_ = int(chr(self.current_selection ) ) snake_case_ = index - self.position if index == self.position: return if index < len(self.choices ): if self.position > index: self.move_direction(Direction.UP , -movement ) elif self.position < index: self.move_direction(Direction.DOWN , a__ ) else: return else: return def lowerCAmelCase__ ( self , a__ = 0 ) -> List[str]: '''simple docstring''' if self.prompt: linebreak() forceWrite(self.prompt , "\n" ) if in_colab: forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" ) else: forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" ) snake_case_ = default_choice for i in range(len(self.choices ) ): self.print_choice(a__ ) forceWrite("\n" ) move_cursor(len(self.choices ) - self.position , "UP" ) with cursor.hide(): while True: if in_colab: try: snake_case_ = int(builtins.input() ) except ValueError: snake_case_ = default_choice else: snake_case_ = self.handle_input() if choice is not None: reset_cursor() for _ in range(len(self.choices ) + 1 ): move_cursor(1 , "UP" ) clear_line() self.write_choice(a__ , "\n" ) return choice
85
0
def UpperCamelCase ( __lowercase : Optional[int] ,__lowercase : Optional[int] ): '''simple docstring''' A_ : Optional[int] = [0 for i in range(r + 1 )] # nc0 = 1 A_ : Tuple = 1 for i in range(1 ,n + 1 ): # to compute current row from previous row. A_ : Tuple = min(__lowercase ,__lowercase ) while j > 0: c[j] += c[j - 1] j -= 1 return c[r] print(binomial_coefficient(n=10, r=5))
140
'''simple docstring''' import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available from . import BaseDiffusersCLICommand def UpperCamelCase_( snake_case : Optional[int] ): '''simple docstring''' return EnvironmentCommand() class _snake_case ( lowercase_ ): @staticmethod def lowerCAmelCase__ ( a__ ) -> Optional[int]: '''simple docstring''' snake_case_ = parser.add_parser("env" ) download_parser.set_defaults(func=a__ ) def lowerCAmelCase__ ( self ) -> Tuple: '''simple docstring''' snake_case_ = huggingface_hub.__version__ snake_case_ = "not installed" snake_case_ = "NA" if is_torch_available(): import torch snake_case_ = torch.__version__ snake_case_ = torch.cuda.is_available() snake_case_ = "not installed" if is_transformers_available(): import transformers snake_case_ = transformers.__version__ snake_case_ = "not installed" if is_accelerate_available(): import accelerate snake_case_ = accelerate.__version__ snake_case_ = "not installed" if is_xformers_available(): import xformers snake_case_ = xformers.__version__ snake_case_ = { "`diffusers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "PyTorch version (GPU?)": F'{pt_version} ({pt_cuda_available})', "Huggingface_hub version": hub_version, "Transformers version": transformers_version, "Accelerate version": accelerate_version, "xFormers version": xformers_version, "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(a__ ) ) return info @staticmethod def lowerCAmelCase__ ( a__ ) -> str: '''simple docstring''' return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n"
85
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available, is_vision_available, ) UpperCAmelCase_ : Tuple = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : str = ["BeitFeatureExtractor"] UpperCAmelCase_ : Dict = ["BeitImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : List[Any] = [ "BEIT_PRETRAINED_MODEL_ARCHIVE_LIST", "BeitForImageClassification", "BeitForMaskedImageModeling", "BeitForSemanticSegmentation", "BeitModel", "BeitPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase_ : Any = [ "FlaxBeitForImageClassification", "FlaxBeitForMaskedImageModeling", "FlaxBeitModel", "FlaxBeitPreTrainedModel", ] if TYPE_CHECKING: from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_beit import BeitFeatureExtractor from .image_processing_beit import BeitImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_beit import ( BEIT_PRETRAINED_MODEL_ARCHIVE_LIST, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, BeitPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_beit import ( FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel, FlaxBeitPreTrainedModel, ) else: import sys UpperCAmelCase_ : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
91
'''simple docstring''' import os _SCREAMING_SNAKE_CASE : int = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} def UpperCamelCase_( snake_case : str ): '''simple docstring''' snake_case_ = 0 snake_case_ = 0 while index < len(snake_case ) - 1: snake_case_ = SYMBOLS[numerals[index]] snake_case_ = SYMBOLS[numerals[index + 1]] if current_value < next_value: total_value -= current_value else: total_value += current_value index += 1 total_value += SYMBOLS[numerals[index]] return total_value def UpperCamelCase_( snake_case : int ): '''simple docstring''' snake_case_ = "" snake_case_ = num // 1_0_0_0 numerals += m_count * "M" num %= 1_0_0_0 snake_case_ = num // 1_0_0 if c_count == 9: numerals += "CM" c_count -= 9 elif c_count == 4: numerals += "CD" c_count -= 4 if c_count >= 5: numerals += "D" c_count -= 5 numerals += c_count * "C" num %= 1_0_0 snake_case_ = num // 1_0 if x_count == 9: numerals += "XC" x_count -= 9 elif x_count == 4: numerals += "XL" x_count -= 4 if x_count >= 5: numerals += "L" x_count -= 5 numerals += x_count * "X" num %= 1_0 if num == 9: numerals += "IX" num -= 9 elif num == 4: numerals += "IV" num -= 4 if num >= 5: numerals += "V" num -= 5 numerals += num * "I" return numerals def UpperCamelCase_( snake_case : str = "/p089_roman.txt" ): '''simple docstring''' snake_case_ = 0 with open(os.path.dirname(snake_case ) + roman_numerals_filename ) as filea: snake_case_ = filea.readlines() for line in lines: snake_case_ = line.strip() snake_case_ = parse_roman_numerals(snake_case ) snake_case_ = generate_roman_numerals(snake_case ) savings += len(snake_case ) - len(snake_case ) return savings if __name__ == "__main__": print(F"{solution() = }")
85
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_longformer""": [ """LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LongformerConfig""", """LongformerOnnxConfig""", ], """tokenization_longformer""": ["""LongformerTokenizer"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""LongformerTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """LongformerForMaskedLM""", """LongformerForMultipleChoice""", """LongformerForQuestionAnswering""", """LongformerForSequenceClassification""", """LongformerForTokenClassification""", """LongformerModel""", """LongformerPreTrainedModel""", """LongformerSelfAttention""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFLongformerForMaskedLM""", """TFLongformerForMultipleChoice""", """TFLongformerForQuestionAnswering""", """TFLongformerForSequenceClassification""", """TFLongformerForTokenClassification""", """TFLongformerModel""", """TFLongformerPreTrainedModel""", """TFLongformerSelfAttention""", ] if TYPE_CHECKING: from .configuration_longformer import ( LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerOnnxConfig, ) from .tokenization_longformer import LongformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_longformer_fast import LongformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerPreTrainedModel, LongformerSelfAttention, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerPreTrainedModel, TFLongformerSelfAttention, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
86
"""simple docstring""" import numpy as np def __lowerCAmelCase (_UpperCamelCase ): return 1 / (1 + np.exp(-vector )) def __lowerCAmelCase (_UpperCamelCase ): return vector * sigmoid(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" lowerCamelCase__ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] lowerCamelCase__ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] lowerCamelCase__ = { 0: """Sunday""", 1: """Monday""", 2: """Tuesday""", 3: """Wednesday""", 4: """Thursday""", 5: """Friday""", 6: """Saturday""", } def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): assert len(str(_UpperCamelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __lowerCAmelCase : Optional[Any] = year // 100 __lowerCAmelCase : Any = (5 * (century % 4) + 2) % 7 __lowerCAmelCase : Tuple = year % 100 __lowerCAmelCase : Optional[int] = centurian % 12 __lowerCAmelCase : Dict = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __lowerCAmelCase : int = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) __lowerCAmelCase : Tuple = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Union[str, Any] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : Dict = is_training __lowerCAmelCase : List[str] = use_input_mask __lowerCAmelCase : int = use_token_type_ids __lowerCAmelCase : Optional[int] = use_labels __lowerCAmelCase : List[Any] = vocab_size __lowerCAmelCase : Dict = hidden_size __lowerCAmelCase : Tuple = embedding_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Union[str, Any] = intermediate_size __lowerCAmelCase : Optional[Any] = hidden_act __lowerCAmelCase : Optional[int] = hidden_dropout_prob __lowerCAmelCase : Dict = attention_probs_dropout_prob __lowerCAmelCase : Any = max_position_embeddings __lowerCAmelCase : Any = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : List[str] = initializer_range __lowerCAmelCase : str = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : Union[str, Any] = scope def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Optional[int] = None if self.use_input_mask: __lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : str = None if self.use_token_type_ids: __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Union[str, Any] = None if self.use_labels: __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Dict = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[Any] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[str] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = self.num_labels __lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = self.num_labels __lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = self.num_choices __lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[str] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) : Optional[Any] = config_and_inputs __lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : str = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) A_ : List[str] = ( { 'feature-extraction': MobileBertModel, 'fill-mask': MobileBertForMaskedLM, 'question-answering': MobileBertForQuestionAnswering, 'text-classification': MobileBertForSequenceClassification, 'token-classification': MobileBertForTokenClassification, 'zero-shot': MobileBertForSequenceClassification, } if is_torch_available() else {} ) A_ : Dict = True def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = MobileBertModelTester(self ) __lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCAmelCase (_UpperCamelCase ): return torch.tensor( _UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , ) lowerCamelCase__ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=_SCREAMING_SNAKE_CASE , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE __lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) __lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
86
1
"""simple docstring""" from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig lowerCamelCase__ = logging.get_logger(__name__) # General docstring lowerCamelCase__ = """MobileNetV1Config""" # Base docstring lowerCamelCase__ = """google/mobilenet_v1_1.0_224""" lowerCamelCase__ = [1, 1_024, 7, 7] # Image classification docstring lowerCamelCase__ = """google/mobilenet_v1_1.0_224""" lowerCamelCase__ = """tabby, tabby cat""" lowerCamelCase__ = [ """google/mobilenet_v1_1.0_224""", """google/mobilenet_v1_0.75_192""", # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): __lowerCAmelCase : List[str] = {} if isinstance(_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Tuple = model.mobilenet_va else: __lowerCAmelCase : List[Any] = model __lowerCAmelCase : Any = 'MobilenetV1/Conv2d_0/' __lowerCAmelCase : List[str] = backbone.conv_stem.convolution.weight __lowerCAmelCase : Optional[int] = backbone.conv_stem.normalization.bias __lowerCAmelCase : Dict = backbone.conv_stem.normalization.weight __lowerCAmelCase : Optional[int] = backbone.conv_stem.normalization.running_mean __lowerCAmelCase : str = backbone.conv_stem.normalization.running_var for i in range(13 ): __lowerCAmelCase : Union[str, Any] = i + 1 __lowerCAmelCase : List[str] = i * 2 __lowerCAmelCase : List[Any] = backbone.layer[pt_index] __lowerCAmelCase : Union[str, Any] = F"MobilenetV1/Conv2d_{tf_index}_depthwise/" __lowerCAmelCase : Any = pointer.convolution.weight __lowerCAmelCase : Tuple = pointer.normalization.bias __lowerCAmelCase : Tuple = pointer.normalization.weight __lowerCAmelCase : List[str] = pointer.normalization.running_mean __lowerCAmelCase : Dict = pointer.normalization.running_var __lowerCAmelCase : int = backbone.layer[pt_index + 1] __lowerCAmelCase : str = F"MobilenetV1/Conv2d_{tf_index}_pointwise/" __lowerCAmelCase : Union[str, Any] = pointer.convolution.weight __lowerCAmelCase : int = pointer.normalization.bias __lowerCAmelCase : Tuple = pointer.normalization.weight __lowerCAmelCase : List[Any] = pointer.normalization.running_mean __lowerCAmelCase : Tuple = pointer.normalization.running_var if isinstance(_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[str] = 'MobilenetV1/Logits/Conv2d_1c_1x1/' __lowerCAmelCase : Optional[Any] = model.classifier.weight __lowerCAmelCase : List[str] = model.classifier.bias return tf_to_pt_map def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): try: import numpy as np import tensorflow as tf except ImportError: logger.error( 'Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see ' 'https://www.tensorflow.org/install/ for installation instructions.' ) raise # Load weights from TF model __lowerCAmelCase : List[str] = tf.train.list_variables(_UpperCamelCase ) __lowerCAmelCase : Optional[Any] = {} for name, shape in init_vars: logger.info(F"Loading TF weight {name} with shape {shape}" ) __lowerCAmelCase : Any = tf.train.load_variable(_UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : Dict = array # Build TF to PyTorch weights loading map __lowerCAmelCase : Optional[Any] = _build_tf_to_pytorch_map(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) for name, pointer in tf_to_pt_map.items(): logger.info(F"Importing {name}" ) if name not in tf_weights: logger.info(F"{name} not in tf pre-trained weights, skipping" ) continue __lowerCAmelCase : Any = tf_weights[name] if "depthwise_weights" in name: logger.info('Transposing depthwise' ) __lowerCAmelCase : Dict = np.transpose(_UpperCamelCase , (2, 3, 0, 1) ) elif "weights" in name: logger.info('Transposing' ) if len(pointer.shape ) == 2: # copying into linear layer __lowerCAmelCase : Optional[Any] = array.squeeze().transpose() else: __lowerCAmelCase : Any = np.transpose(_UpperCamelCase , (3, 2, 0, 1) ) if pointer.shape != array.shape: raise ValueError(F"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" ) logger.info(F"Initialize PyTorch weight {name} {array.shape}" ) __lowerCAmelCase : str = torch.from_numpy(_UpperCamelCase ) tf_weights.pop(_UpperCamelCase , _UpperCamelCase ) tf_weights.pop(name + '/RMSProp' , _UpperCamelCase ) tf_weights.pop(name + '/RMSProp_1' , _UpperCamelCase ) tf_weights.pop(name + '/ExponentialMovingAverage' , _UpperCamelCase ) logger.info(F"Weights not copied to PyTorch model: {', '.join(tf_weights.keys() )}" ) return model def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase , __lowerCAmelCase : List[str] = features.shape[-2:] __lowerCAmelCase , __lowerCAmelCase : Dict = conv_layer.stride __lowerCAmelCase , __lowerCAmelCase : int = conv_layer.kernel_size if in_height % stride_height == 0: __lowerCAmelCase : Dict = max(kernel_height - stride_height , 0 ) else: __lowerCAmelCase : Optional[int] = max(kernel_height - (in_height % stride_height) , 0 ) if in_width % stride_width == 0: __lowerCAmelCase : Tuple = max(kernel_width - stride_width , 0 ) else: __lowerCAmelCase : Tuple = max(kernel_width - (in_width % stride_width) , 0 ) __lowerCAmelCase : Optional[Any] = pad_along_width // 2 __lowerCAmelCase : int = pad_along_width - pad_left __lowerCAmelCase : str = pad_along_height // 2 __lowerCAmelCase : Tuple = pad_along_height - pad_top __lowerCAmelCase : Optional[int] = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_UpperCamelCase , _UpperCamelCase , 'constant' , 0.0 ) class A__ ( nn.Module): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , ): super().__init__() __lowerCAmelCase : Any = config if in_channels % groups != 0: raise ValueError(f"Input channels ({in_channels}) are not divisible by {groups} groups." ) if out_channels % groups != 0: raise ValueError(f"Output channels ({out_channels}) are not divisible by {groups} groups." ) __lowerCAmelCase : Dict = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) __lowerCAmelCase : Optional[int] = nn.Convad( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , groups=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE , padding_mode='zeros' , ) if use_normalization: __lowerCAmelCase : Dict = nn.BatchNormad( num_features=_SCREAMING_SNAKE_CASE , eps=config.layer_norm_eps , momentum=0.9997 , affine=_SCREAMING_SNAKE_CASE , track_running_stats=_SCREAMING_SNAKE_CASE , ) else: __lowerCAmelCase : int = None if use_activation: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = ACTaFN[use_activation] elif isinstance(config.hidden_act , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = ACTaFN[config.hidden_act] else: __lowerCAmelCase : List[Any] = config.hidden_act else: __lowerCAmelCase : Any = None def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): if self.config.tf_padding: __lowerCAmelCase : Dict = apply_tf_padding(_SCREAMING_SNAKE_CASE , self.convolution ) __lowerCAmelCase : str = self.convolution(_SCREAMING_SNAKE_CASE ) if self.normalization is not None: __lowerCAmelCase : Union[str, Any] = self.normalization(_SCREAMING_SNAKE_CASE ) if self.activation is not None: __lowerCAmelCase : Optional[Any] = self.activation(_SCREAMING_SNAKE_CASE ) return features class A__ ( _lowerCamelCase): A_ : int = MobileNetVaConfig A_ : Optional[int] = load_tf_weights_in_mobilenet_va A_ : int = 'mobilenet_v1' A_ : Dict = 'pixel_values' A_ : int = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(_SCREAMING_SNAKE_CASE , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) lowerCamelCase__ = R""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ lowerCamelCase__ = R""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( 'The bare MobileNetV1 model outputting raw hidden-states without any specific head on top.' , _lowerCamelCase , ) class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True ): super().__init__(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = config __lowerCAmelCase : Optional[Any] = 32 __lowerCAmelCase : Optional[int] = max(int(depth * config.depth_multiplier ) , config.min_depth ) __lowerCAmelCase : Optional[Any] = MobileNetVaConvLayer( _SCREAMING_SNAKE_CASE , in_channels=config.num_channels , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=3 , stride=2 , ) __lowerCAmelCase : str = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] __lowerCAmelCase : List[str] = nn.ModuleList() for i in range(13 ): __lowerCAmelCase : List[str] = out_channels if strides[i] == 2 or i == 0: depth *= 2 __lowerCAmelCase : List[str] = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( _SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=3 , stride=strides[i] , groups=_SCREAMING_SNAKE_CASE , ) ) self.layer.append( MobileNetVaConvLayer( _SCREAMING_SNAKE_CASE , in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , kernel_size=1 , ) ) __lowerCAmelCase : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): raise NotImplementedError @add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ): __lowerCAmelCase : int = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError('You have to specify pixel_values' ) __lowerCAmelCase : List[str] = self.conv_stem(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): __lowerCAmelCase : Optional[int] = layer_module(_SCREAMING_SNAKE_CASE ) if output_hidden_states: __lowerCAmelCase : int = all_hidden_states + (hidden_states,) __lowerCAmelCase : int = hidden_states if self.pooler is not None: __lowerCAmelCase : Optional[int] = torch.flatten(self.pooler(_SCREAMING_SNAKE_CASE ) , start_dim=1 ) else: __lowerCAmelCase : Optional[Any] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , ) @add_start_docstrings( '\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _lowerCamelCase , ) class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE ): super().__init__(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = config.num_labels __lowerCAmelCase : Union[str, Any] = MobileNetVaModel(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head __lowerCAmelCase : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = nn.Linear(_SCREAMING_SNAKE_CASE , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ): __lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowerCAmelCase : int = self.mobilenet_va(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = outputs.pooler_output if return_dict else outputs[1] __lowerCAmelCase : Dict = self.classifier(self.dropout(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: __lowerCAmelCase : Optional[int] = 'regression' elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): __lowerCAmelCase : Optional[int] = 'single_label_classification' else: __lowerCAmelCase : int = 'multi_label_classification' if self.config.problem_type == "regression": __lowerCAmelCase : Tuple = MSELoss() if self.num_labels == 1: __lowerCAmelCase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() ) else: __lowerCAmelCase : Dict = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) elif self.config.problem_type == "single_label_classification": __lowerCAmelCase : Tuple = CrossEntropyLoss() __lowerCAmelCase : Dict = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": __lowerCAmelCase : Tuple = BCEWithLogitsLoss() __lowerCAmelCase : List[Any] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if not return_dict: __lowerCAmelCase : str = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states , )
86
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class A__ ( _lowerCamelCase): A_ : Any = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'AutoImageProcessor' A_ : str = 'AutoTokenizer' def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = kwargs.pop('feature_extractor' ) __lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = self.image_processor __lowerCAmelCase : Tuple = False def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase : Dict = args[0] __lowerCAmelCase : Union[str, Any] = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None: __lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: __lowerCAmelCase : Union[str, Any] = encodings['input_ids'] return inputs def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @contextmanager def __lowerCamelCase ( self ): warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.' ) __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = self.tokenizer yield __lowerCAmelCase : Optional[int] = self.image_processor __lowerCAmelCase : Optional[Any] = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ): if added_vocab is None: __lowerCAmelCase : str = self.tokenizer.get_added_vocab() __lowerCAmelCase : List[Any] = {} while tokens: __lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break __lowerCAmelCase : Union[str, Any] = start_token.group(1 ) __lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) __lowerCAmelCase : str = start_token.group() if end_token is None: __lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' ) else: __lowerCAmelCase : Optional[Any] = end_token.group() __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: __lowerCAmelCase : List[str] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if value: if len(_SCREAMING_SNAKE_CASE ) == 1: __lowerCAmelCase : Tuple = value[0] __lowerCAmelCase : Tuple = value else: # leaf nodes __lowerCAmelCase : Any = [] for leaf in content.split(R'<sep/>' ): __lowerCAmelCase : List[Any] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens output[key].append(_SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: __lowerCAmelCase : str = output[key][0] __lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor
86
1
"""simple docstring""" import gc import unittest from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline from diffusers.utils import is_flax_available, load_image, slow from diffusers.utils.testing_utils import require_flax if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard @slow @require_flax class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : List[Any] = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-canny' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) __lowerCAmelCase : str = controlnet_params __lowerCAmelCase : Dict = 'bird' __lowerCAmelCase : str = jax.device_count() __lowerCAmelCase : Dict = pipe.prepare_text_inputs([prompts] * num_samples ) __lowerCAmelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png' ) __lowerCAmelCase : Dict = pipe.prepare_image_inputs([canny_image] * num_samples ) __lowerCAmelCase : Dict = jax.random.PRNGKey(0 ) __lowerCAmelCase : List[str] = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() ) __lowerCAmelCase : Tuple = replicate(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = shard(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = shard(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = pipe( prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) __lowerCAmelCase : Tuple = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowerCAmelCase : Any = images[0, 2_53:2_56, 2_53:2_56, -1] __lowerCAmelCase : int = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowerCAmelCase : Dict = jnp.array( [0.16_7969, 0.11_6699, 0.08_1543, 0.15_4297, 0.13_2812, 0.10_8887, 0.16_9922, 0.16_9922, 0.20_5078] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : str = FlaxControlNetModel.from_pretrained( 'lllyasviel/sd-controlnet-openpose' , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = FlaxStableDiffusionControlNetPipeline.from_pretrained( 'runwayml/stable-diffusion-v1-5' , controlnet=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE , dtype=jnp.bfloataa ) __lowerCAmelCase : List[Any] = controlnet_params __lowerCAmelCase : List[Any] = 'Chef in the kitchen' __lowerCAmelCase : Union[str, Any] = jax.device_count() __lowerCAmelCase : Optional[Any] = pipe.prepare_text_inputs([prompts] * num_samples ) __lowerCAmelCase : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png' ) __lowerCAmelCase : int = pipe.prepare_image_inputs([pose_image] * num_samples ) __lowerCAmelCase : List[Any] = jax.random.PRNGKey(0 ) __lowerCAmelCase : Optional[Any] = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() ) __lowerCAmelCase : Optional[Any] = replicate(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = shard(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = shard(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = pipe( prompt_ids=_SCREAMING_SNAKE_CASE , image=_SCREAMING_SNAKE_CASE , params=_SCREAMING_SNAKE_CASE , prng_seed=_SCREAMING_SNAKE_CASE , num_inference_steps=50 , jit=_SCREAMING_SNAKE_CASE , ).images assert images.shape == (jax.device_count(), 1, 7_68, 5_12, 3) __lowerCAmelCase : Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] ) __lowerCAmelCase : Any = images[0, 2_53:2_56, 2_53:2_56, -1] __lowerCAmelCase : Optional[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) ) __lowerCAmelCase : Tuple = jnp.array( [[0.27_1484, 0.26_1719, 0.27_5391, 0.27_7344, 0.27_9297, 0.29_1016, 0.29_4922, 0.30_2734, 0.30_2734]] ) print(f"output_slice: {output_slice}" ) assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
86
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Tuple = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __lowerCAmelCase (_UpperCamelCase = 100 ): __lowerCAmelCase : Optional[int] = 1 __lowerCAmelCase : Optional[Any] = 2 for i in range(2 , max_n + 1 ): __lowerCAmelCase : Any = pre_numerator __lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1 __lowerCAmelCase : int = cur_numerator __lowerCAmelCase : Dict = e_cont * pre_numerator + temp return sum_digits(_UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
86
1
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Generic, TypeVar lowerCamelCase__ = TypeVar("""T""") class A__ ( Generic[T]): def __init__( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = data __lowerCAmelCase : Node[T] | None = None def __str__( self ): return f"{self.data}" class A__ ( Generic[T]): def __init__( self ): __lowerCAmelCase : Node[T] | None = None def __iter__( self ): __lowerCAmelCase : Tuple = self.top while node: yield node.data __lowerCAmelCase : Optional[int] = node.next def __str__( self ): return "->".join([str(_SCREAMING_SNAKE_CASE ) for item in self] ) def __len__( self ): return len(tuple(iter(self ) ) ) def __lowerCamelCase ( self ): return self.top is None def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = Node(_SCREAMING_SNAKE_CASE ) if not self.is_empty(): __lowerCAmelCase : Union[str, Any] = self.top __lowerCAmelCase : Optional[int] = node def __lowerCamelCase ( self ): if self.is_empty(): raise IndexError('pop from empty stack' ) assert isinstance(self.top , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = self.top __lowerCAmelCase : int = self.top.next return pop_node.data def __lowerCamelCase ( self ): if self.is_empty(): raise IndexError('peek from empty stack' ) assert self.top is not None return self.top.data def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = None if __name__ == "__main__": from doctest import testmod testmod()
86
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class A__ ( _lowerCamelCase): A_ : List[Any] = 'markuplm' def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = vocab_size __lowerCAmelCase : Any = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Union[str, Any] = hidden_act __lowerCAmelCase : List[Any] = intermediate_size __lowerCAmelCase : List[str] = hidden_dropout_prob __lowerCAmelCase : List[str] = attention_probs_dropout_prob __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : int = type_vocab_size __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : int = layer_norm_eps __lowerCAmelCase : List[str] = position_embedding_type __lowerCAmelCase : List[Any] = use_cache __lowerCAmelCase : Optional[Any] = classifier_dropout # additional properties __lowerCAmelCase : Optional[int] = max_depth __lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings __lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings __lowerCAmelCase : Any = tag_pad_id __lowerCAmelCase : Union[str, Any] = subs_pad_id __lowerCAmelCase : int = xpath_unit_hidden_size
86
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase__ = {"""configuration_mra""": ["""MRA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MraConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """MRA_PRETRAINED_MODEL_ARCHIVE_LIST""", """MraForMaskedLM""", """MraForMultipleChoice""", """MraForQuestionAnswering""", """MraForSequenceClassification""", """MraForTokenClassification""", """MraLayer""", """MraModel""", """MraPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
86
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } lowerCamelCase__ = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): for attribute in key.split('.' ): __lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: __lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: __lowerCAmelCase : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[Any] = value elif weight_type == "weight_v": __lowerCAmelCase : Any = value elif weight_type == "bias": __lowerCAmelCase : List[str] = value else: __lowerCAmelCase : List[Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Any = [] __lowerCAmelCase : Optional[int] = fairseq_model.state_dict() __lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , ) __lowerCAmelCase : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowerCAmelCase : int = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2] __lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase ) if "weight_g" in name: __lowerCAmelCase : Union[str, Any] = 'weight_g' elif "weight_v" in name: __lowerCAmelCase : int = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __lowerCAmelCase : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : List[str] = 'weight' else: __lowerCAmelCase : Optional[Any] = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1] __lowerCAmelCase : Any = name.split('.' ) __lowerCAmelCase : List[Any] = int(items[0] ) __lowerCAmelCase : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowerCAmelCase : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowerCAmelCase : int = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __lowerCAmelCase : Optional[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __lowerCAmelCase : Any = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): # load the pre-trained checkpoints __lowerCAmelCase : Any = torch.load(_UpperCamelCase ) __lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] ) __lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase ) else: __lowerCAmelCase : List[str] = WavLMConfig() __lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase ) recursively_load_weights(_UpperCamelCase , _UpperCamelCase ) hf_wavlm.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
1
"""simple docstring""" import os import pytest from attr import dataclass lowerCamelCase__ = """us-east-1""" # defaults region @dataclass class A__ : A_ : str A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' A_ : Optional[int] = { 'task_name': 'mnli', 'per_device_train_batch_size': 1_6, 'per_device_eval_batch_size': 1_6, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_0_0, 'save_steps': 5_5_0_0, } A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0} @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowerCamelCase ( self ): return f"{self.framework}-transfromers-test" @property def __lowerCamelCase ( self ): return f"./tests/sagemaker/scripts/{self.framework}" @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework )
86
"""simple docstring""" import os import pytest from attr import dataclass lowerCamelCase__ = """us-east-1""" # defaults region @dataclass class A__ : A_ : str A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' A_ : Optional[int] = { 'task_name': 'mnli', 'per_device_train_batch_size': 1_6, 'per_device_eval_batch_size': 1_6, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_0_0, 'save_steps': 5_5_0_0, } A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0} @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowerCamelCase ( self ): return f"{self.framework}-transfromers-test" @property def __lowerCamelCase ( self ): return f"./tests/sagemaker/scripts/{self.framework}" @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework )
86
1
"""simple docstring""" from collections import defaultdict class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = total # total no of tasks (N) # DP table will have a dimension of (2^M)*N # initially all values are set to -1 __lowerCAmelCase : Optional[int] = [ [-1 for i in range(total + 1 )] for j in range(2 ** len(_SCREAMING_SNAKE_CASE ) ) ] __lowerCAmelCase : int = defaultdict(_SCREAMING_SNAKE_CASE ) # stores the list of persons for each task # final_mask is used to check if all persons are included by setting all bits # to 1 __lowerCAmelCase : Dict = (1 << len(_SCREAMING_SNAKE_CASE )) - 1 def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # if mask == self.finalmask all persons are distributed tasks, return 1 if mask == self.final_mask: return 1 # if not everyone gets the task and no more tasks are available, return 0 if task_no > self.total_tasks: return 0 # if case already considered if self.dp[mask][task_no] != -1: return self.dp[mask][task_no] # Number of ways when we don't this task in the arrangement __lowerCAmelCase : int = self.count_ways_until(_SCREAMING_SNAKE_CASE , task_no + 1 ) # now assign the tasks one by one to all possible persons and recursively # assign for the remaining tasks. if task_no in self.task: for p in self.task[task_no]: # if p is already given a task if mask & (1 << p): continue # assign this task to p and change the mask value. And recursively # assign tasks with the new mask value. total_ways_util += self.count_ways_until(mask | (1 << p) , task_no + 1 ) # save the value. __lowerCAmelCase : Dict = total_ways_util return self.dp[mask][task_no] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): # Store the list of persons for each task for i in range(len(_SCREAMING_SNAKE_CASE ) ): for j in task_performed[i]: self.task[j].append(_SCREAMING_SNAKE_CASE ) # call the function to fill the DP table, final answer is stored in dp[0][1] return self.count_ways_until(0 , 1 ) if __name__ == "__main__": lowerCamelCase__ = 5 # total no of tasks (the value of N) # the list of tasks that can be done by M persons. lowerCamelCase__ = [[1, 3, 4], [1, 2, 5], [3, 4]] print( AssignmentUsingBitmask(task_performed, total_tasks).count_no_of_ways( task_performed ) )
86
"""simple docstring""" from __future__ import annotations lowerCamelCase__ = list[tuple[int, int]] lowerCamelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = pos_x __lowerCAmelCase : Optional[Any] = pos_y __lowerCAmelCase : Optional[int] = (pos_y, pos_x) __lowerCAmelCase : Union[str, Any] = goal_x __lowerCAmelCase : Any = goal_y __lowerCAmelCase : Optional[Any] = g_cost __lowerCAmelCase : Any = parent __lowerCAmelCase : Union[str, Any] = self.calculate_heuristic() def __lowerCamelCase ( self ): __lowerCAmelCase : str = abs(self.pos_x - self.goal_x ) __lowerCAmelCase : str = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , _SCREAMING_SNAKE_CASE ): return self.f_cost < other.f_cost class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = [self.start] __lowerCAmelCase : list[Node] = [] __lowerCAmelCase : str = False def __lowerCamelCase ( self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: __lowerCAmelCase : Union[str, Any] = True return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path __lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = [] for action in delta: __lowerCAmelCase : Optional[int] = parent.pos_x + action[1] __lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = node __lowerCAmelCase : Optional[int] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowerCAmelCase : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowerCamelCase__ = (0, 0) lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("""------""") lowerCamelCase__ = GreedyBestFirst(init, goal) lowerCamelCase__ = greedy_bf.search() if path: for pos_x, pos_y in path: lowerCamelCase__ = 2 for elem in grid: print(elem)
86
1
"""simple docstring""" import os import shutil from pathlib import Path from typing import Optional, Union import numpy as np from huggingface_hub import hf_hub_download from ..utils import ONNX_EXTERNAL_WEIGHTS_NAME, ONNX_WEIGHTS_NAME, is_onnx_available, logging if is_onnx_available(): import onnxruntime as ort lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """tensor(bool)""": np.bool_, """tensor(int8)""": np.inta, """tensor(uint8)""": np.uinta, """tensor(int16)""": np.intaa, """tensor(uint16)""": np.uintaa, """tensor(int32)""": np.intaa, """tensor(uint32)""": np.uintaa, """tensor(int64)""": np.intaa, """tensor(uint64)""": np.uintaa, """tensor(float16)""": np.floataa, """tensor(float)""": np.floataa, """tensor(double)""": np.floataa, } class A__ : def __init__( self , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): logger.info('`diffusers.OnnxRuntimeModel` is experimental and might change in the future.' ) __lowerCAmelCase : Optional[int] = model __lowerCAmelCase : List[str] = kwargs.get('model_save_dir' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = kwargs.get('latest_model_name' , _SCREAMING_SNAKE_CASE ) def __call__( self , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = {k: np.array(_SCREAMING_SNAKE_CASE ) for k, v in kwargs.items()} return self.model.run(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCamelCase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ): if provider is None: logger.info('No onnxruntime provider specified, using CPUExecutionProvider' ) __lowerCAmelCase : Any = 'CPUExecutionProvider' return ort.InferenceSession(_SCREAMING_SNAKE_CASE , providers=[provider] , sess_options=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME __lowerCAmelCase : Any = self.model_save_dir.joinpath(self.latest_model_name ) __lowerCAmelCase : Union[str, Any] = Path(_SCREAMING_SNAKE_CASE ).joinpath(_SCREAMING_SNAKE_CASE ) try: shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) except shutil.SameFileError: pass # copy external weights (for models >2GB) __lowerCAmelCase : int = self.model_save_dir.joinpath(_SCREAMING_SNAKE_CASE ) if src_path.exists(): __lowerCAmelCase : Tuple = Path(_SCREAMING_SNAKE_CASE ).joinpath(_SCREAMING_SNAKE_CASE ) try: shutil.copyfile(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) except shutil.SameFileError: pass def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ): if os.path.isfile(_SCREAMING_SNAKE_CASE ): logger.error(f"Provided path ({save_directory}) should be a directory, not a file" ) return os.makedirs(_SCREAMING_SNAKE_CASE , exist_ok=_SCREAMING_SNAKE_CASE ) # saving model weights/files self._save_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Union[str, Any] = file_name if file_name is not None else ONNX_WEIGHTS_NAME # load model from local directory if os.path.isdir(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = OnnxRuntimeModel.load_model( os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , provider=_SCREAMING_SNAKE_CASE , sess_options=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = Path(_SCREAMING_SNAKE_CASE ) # load model from hub else: # download model __lowerCAmelCase : int = hf_hub_download( repo_id=_SCREAMING_SNAKE_CASE , filename=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Tuple = Path(_SCREAMING_SNAKE_CASE ).parent __lowerCAmelCase : Tuple = Path(_SCREAMING_SNAKE_CASE ).name __lowerCAmelCase : Optional[int] = OnnxRuntimeModel.load_model(_SCREAMING_SNAKE_CASE , provider=_SCREAMING_SNAKE_CASE , sess_options=_SCREAMING_SNAKE_CASE ) return cls(model=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @classmethod def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = None if len(str(_SCREAMING_SNAKE_CASE ).split('@' ) ) == 2: __lowerCAmelCase , __lowerCAmelCase : Any = model_id.split('@' ) return cls._from_pretrained( model_id=_SCREAMING_SNAKE_CASE , revision=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , force_download=_SCREAMING_SNAKE_CASE , use_auth_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
86
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float(moles / volume ) * nfactor ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (volume) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
86
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A__ ( enum.Enum): A_ : List[Any] = 0 A_ : Dict = 1 A_ : Union[str, Any] = 2 @add_end_docstrings(_lowerCamelCase) class A__ ( _lowerCamelCase): A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowerCAmelCase : Any = None if self.model.config.prefix is not None: __lowerCAmelCase : str = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowerCAmelCase : Tuple = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params ) __lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params} __lowerCAmelCase : List[str] = {**self._forward_params, **forward_params} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Optional[int] = {} if prefix is not None: __lowerCAmelCase : Union[str, Any] = prefix if prefix: __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ' [None, \'hole\']' ) __lowerCAmelCase : int = handle_long_generation preprocess_params.update(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = generate_kwargs __lowerCAmelCase : List[Any] = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : List[Any] = ReturnType.TENSORS if return_type is not None: __lowerCAmelCase : Optional[Any] = return_type if clean_up_tokenization_spaces is not None: __lowerCAmelCase : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: __lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowerCAmelCase : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = self.tokenizer( prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : Optional[Any] = prompt_text if handle_long_generation == "hole": __lowerCAmelCase : str = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens'] else: __lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:] return inputs def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = model_inputs['input_ids'] __lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE ) # Allow empty prompts if input_ids.shape[1] == 0: __lowerCAmelCase : Dict = None __lowerCAmelCase : str = None __lowerCAmelCase : Tuple = 1 else: __lowerCAmelCase : Any = input_ids.shape[0] __lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = generated_sequence.shape[0] if self.framework == "pt": __lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ): __lowerCAmelCase : Any = model_outputs['generated_sequence'][0] __lowerCAmelCase : Tuple = model_outputs['input_ids'] __lowerCAmelCase : Any = model_outputs['prompt_text'] __lowerCAmelCase : int = generated_sequence.numpy().tolist() __lowerCAmelCase : Union[str, Any] = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowerCAmelCase : int = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowerCAmelCase : Any = self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowerCAmelCase : Optional[Any] = 0 else: __lowerCAmelCase : Any = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) ) if return_type == ReturnType.FULL_TEXT: __lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:] else: __lowerCAmelCase : int = text[prompt_length:] __lowerCAmelCase : Dict = {'generated_text': all_text} records.append(_SCREAMING_SNAKE_CASE ) return records
86
1
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = "arrow" , **_SCREAMING_SNAKE_CASE , ): super().__init__( split=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE , streaming=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : List[str] = load_from_cache_file __lowerCAmelCase : Any = file_format __lowerCAmelCase : Dict = Spark( df=_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , working_dir=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) __lowerCAmelCase : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_SCREAMING_SNAKE_CASE , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
86
"""simple docstring""" from __future__ import annotations import bisect def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): if hi < 0: __lowerCAmelCase : Tuple = len(_UpperCamelCase ) while lo < hi: __lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __lowerCAmelCase : int = mid + 1 else: __lowerCAmelCase : List[str] = mid return lo def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): if hi < 0: __lowerCAmelCase : List[Any] = len(_UpperCamelCase ) while lo < hi: __lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __lowerCAmelCase : Dict = mid + 1 else: __lowerCAmelCase : str = mid return lo def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = 0 __lowerCAmelCase : int = len(_UpperCamelCase ) - 1 while left <= right: __lowerCAmelCase : List[Any] = left + (right - left) // 2 __lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __lowerCAmelCase : Tuple = midpoint - 1 else: __lowerCAmelCase : str = midpoint + 1 return None def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase ) if index != len(_UpperCamelCase ) and sorted_collection[index] == item: return index return None def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if right < left: return None __lowerCAmelCase : List[str] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip() lowerCamelCase__ = sorted(int(item) for item in user_input.split(""",""")) lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n""")) lowerCamelCase__ = binary_search(collection, target) if result is None: print(f'{target} was not found in {collection}.') else: print(f'{target} was found at position {result} in {collection}.')
86
1
"""simple docstring""" import os import time import numpy as np import onnxruntime as ort lowerCamelCase__ = """1""" lowerCamelCase__ = """0""" lowerCamelCase__ = """1""" lowerCamelCase__ = ort.SessionOptions() lowerCamelCase__ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print("""Create inference session...""") lowerCamelCase__ = ["""TensorrtExecutionProvider""", """CUDAExecutionProvider"""] lowerCamelCase__ = ort.InferenceSession("""model.onnx""", sess_options=sess_opt, providers=execution_provider) lowerCamelCase__ = ort.RunOptions() lowerCamelCase__ = 128 lowerCamelCase__ = 1 lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa) lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa) lowerCamelCase__ = np.ones((batch, sequence), dtype=np.intaa) print("""Warm up phase...""") sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Start inference...""") lowerCamelCase__ = time.time() lowerCamelCase__ = 2_000 lowerCamelCase__ = {} for iter in range(max_iters): lowerCamelCase__ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print("""Average Inference Time = {:.3f} ms""".format((time.time() - start_time) * 1_000 / max_iters))
86
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
86
1
"""simple docstring""" from __future__ import annotations lowerCamelCase__ = list[tuple[int, int]] lowerCamelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = pos_x __lowerCAmelCase : Optional[Any] = pos_y __lowerCAmelCase : Optional[int] = (pos_y, pos_x) __lowerCAmelCase : Union[str, Any] = goal_x __lowerCAmelCase : Any = goal_y __lowerCAmelCase : Optional[Any] = g_cost __lowerCAmelCase : Any = parent __lowerCAmelCase : Union[str, Any] = self.calculate_heuristic() def __lowerCamelCase ( self ): __lowerCAmelCase : str = abs(self.pos_x - self.goal_x ) __lowerCAmelCase : str = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , _SCREAMING_SNAKE_CASE ): return self.f_cost < other.f_cost class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = [self.start] __lowerCAmelCase : list[Node] = [] __lowerCAmelCase : str = False def __lowerCamelCase ( self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: __lowerCAmelCase : Union[str, Any] = True return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path __lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = [] for action in delta: __lowerCAmelCase : Optional[int] = parent.pos_x + action[1] __lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = node __lowerCAmelCase : Optional[int] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowerCAmelCase : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowerCamelCase__ = (0, 0) lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("""------""") lowerCamelCase__ = GreedyBestFirst(init, goal) lowerCamelCase__ = greedy_bf.search() if path: for pos_x, pos_y in path: lowerCamelCase__ = 2 for elem in grid: print(elem)
86
"""simple docstring""" from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCamelCase__ = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ): __lowerCAmelCase : Dict = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 __lowerCAmelCase : List[str] = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): super().__init__() self.register_modules( text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if latents is None: __lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) __lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma return latents def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1 # get prompt text embeddings __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) __lowerCAmelCase : Tuple = text_inputs.input_ids __lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) __lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase : List[str] if negative_prompt is None: __lowerCAmelCase : Union[str, Any] = [''] * batch_size elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !=" f" {type(_SCREAMING_SNAKE_CASE )}." ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" ' the batch size of `prompt`.' ) else: __lowerCAmelCase : Optional[int] = negative_prompt __lowerCAmelCase : Tuple = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) __lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1] __lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1] __lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) __lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) __lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) __lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) __lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) __lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" ) __lowerCAmelCase : List[Any] = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ): if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) __lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCAmelCase : Any = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: __lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE ) if self.safety_checker is not None: __lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE ) # We'll offload the last model manually. __lowerCAmelCase : Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCamelCase ( self ): if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = 1 elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" ) __lowerCAmelCase : Dict = self._execution_device __lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt __lowerCAmelCase : Optional[int] = guidance_scale > 1.0 __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE ) self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.scheduler.timesteps __lowerCAmelCase : int = self.unet.config.in_channels __lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor ) # create initial latent __lowerCAmelCase : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , ) for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance __lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds} __lowerCAmelCase : Optional[Any] = self.unet( sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0] if do_classifier_free_guidance: __lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 ) __lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 ) __lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCAmelCase : List[str] = self.scheduler.step( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample # post-processing __lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: __lowerCAmelCase : List[str] = image * 0.5 + 0.5 __lowerCAmelCase : Dict = image.clamp(0 , 1 ) __lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
86
1
"""simple docstring""" # flake8: noqa # Lint as: python3 lowerCamelCase__ = [ """VerificationMode""", """Version""", """disable_progress_bar""", """enable_progress_bar""", """is_progress_bar_enabled""", """experimental""", ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
86
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = BarthezTokenizer A_ : Tuple = BarthezTokenizerFast A_ : Dict = True A_ : List[str] = True def __lowerCamelCase ( self ): super().setUp() __lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = tokenizer def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = '<pad>' __lowerCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 ) def __lowerCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2] __lowerCAmelCase : Optional[int] = self.tokenizer( _SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __lowerCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): if not self.test_rust_tokenizer: return __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.' __lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # fmt: off __lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __lowerCAmelCase : Union[str, Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
86
1
"""simple docstring""" import copy import inspect import unittest from transformers import AutoBackbone from transformers.configuration_utils import PretrainedConfig from transformers.testing_utils import require_timm, require_torch, torch_device from transformers.utils.import_utils import is_torch_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor if is_torch_available(): import torch from transformers import TimmBackbone, TimmBackboneConfig from ...test_pipeline_mixin import PipelineTesterMixin class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="resnet50" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , ): __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Optional[int] = out_indices if out_indices is not None else [4] __lowerCAmelCase : Optional[int] = stage_names __lowerCAmelCase : Optional[Any] = out_features __lowerCAmelCase : Any = backbone __lowerCAmelCase : List[Any] = batch_size __lowerCAmelCase : List[str] = image_size __lowerCAmelCase : str = num_channels __lowerCAmelCase : int = use_pretrained_backbone __lowerCAmelCase : Optional[Any] = is_training def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __lowerCAmelCase : str = self.get_config() return config, pixel_values def __lowerCamelCase ( self ): return TimmBackboneConfig( image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = TimmBackbone(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() with torch.no_grad(): __lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual( result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = config_and_inputs __lowerCAmelCase : Union[str, Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch @require_timm class A__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : Tuple = (TimmBackbone,) if is_torch_available() else () A_ : Dict = {'feature-extraction': TimmBackbone} if is_torch_available() else {} A_ : Optional[int] = False A_ : List[str] = False A_ : str = False A_ : Dict = False def __lowerCamelCase ( self ): __lowerCAmelCase : int = TimmBackboneModelTester(self ) __lowerCAmelCase : List[str] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = 'resnet18' __lowerCAmelCase : Optional[int] = 'microsoft/resnet-18' __lowerCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) # Out indices are set to the last layer by default. For timm models, we don't know # the number of layers in advance, so we set it to (-1,), whereas for transformers # models, we set it to [len(stage_names) - 1] (kept for backward compatibility). self.assertEqual(timm_model.out_indices , (-1,) ) self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] ) __lowerCAmelCase : int = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , use_timm_backbone=_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) __lowerCAmelCase : Optional[int] = AutoBackbone.from_pretrained(_SCREAMING_SNAKE_CASE , out_indices=[1, 2, 3] ) self.assertEqual(timm_model.out_indices , transformers_model.out_indices ) self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) ) self.assertEqual(timm_model.channels , transformers_model.channels ) @unittest.skip('TimmBackbone doesn\'t support feed forward chunking' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone initialization is managed on the timm side' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __lowerCamelCase ( self ): pass @unittest.skip('model weights aren\'t tied in TimmBackbone.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' ) def __lowerCamelCase ( self ): pass @unittest.skip('TimmBackbone doesn\'t support output_attentions.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Safetensors is not supported by timm.' ) def __lowerCamelCase ( self ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __lowerCAmelCase : List[str] = [*signature.parameters.keys()] __lowerCAmelCase : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Any = True __lowerCAmelCase : Optional[Any] = self.has_attentions # no need to test all models as different heads yield the same functionality __lowerCAmelCase : Any = self.all_model_classes[0] __lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = model(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = outputs[0][-1] # Encoder-/Decoder-only models __lowerCAmelCase : Dict = outputs.hidden_states[0] hidden_states.retain_grad() if self.has_attentions: __lowerCAmelCase : Tuple = outputs.attentions[0] attentions.retain_grad() output.flatten()[0].backward(retain_graph=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(hidden_states.grad ) if self.has_attentions: self.assertIsNotNone(attentions.grad ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __lowerCAmelCase : Optional[Any] = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[int] = model(**_SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) ) self.assertEqual(len(model.channels ) , len(config.out_indices ) ) # Check output of last stage is taken if out_features=None, out_indices=None __lowerCAmelCase : Optional[int] = copy.deepcopy(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = None __lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : int = model(**_SCREAMING_SNAKE_CASE ) self.assertEqual(len(result.feature_maps ) , 1 ) self.assertEqual(len(model.channels ) , 1 ) # Check backbone can be initialized with fresh weights __lowerCAmelCase : List[Any] = copy.deepcopy(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = False __lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Tuple = model(**_SCREAMING_SNAKE_CASE )
86
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class A__ ( _lowerCamelCase): A_ : Optional[int] = 'poolformer' def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = num_channels __lowerCAmelCase : str = patch_size __lowerCAmelCase : Optional[Any] = stride __lowerCAmelCase : Optional[int] = padding __lowerCAmelCase : List[Any] = pool_size __lowerCAmelCase : int = hidden_sizes __lowerCAmelCase : str = mlp_ratio __lowerCAmelCase : Optional[int] = depths __lowerCAmelCase : str = patch_sizes __lowerCAmelCase : str = strides __lowerCAmelCase : Optional[int] = num_encoder_blocks __lowerCAmelCase : Any = drop_path_rate __lowerCAmelCase : Any = hidden_act __lowerCAmelCase : Dict = use_layer_scale __lowerCAmelCase : Union[str, Any] = layer_scale_init_value __lowerCAmelCase : Dict = initializer_range super().__init__(**_SCREAMING_SNAKE_CASE ) class A__ ( _lowerCamelCase): A_ : List[str] = version.parse('1.11') @property def __lowerCamelCase ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowerCamelCase ( self ): return 2E-3
86
1
"""simple docstring""" from __future__ import annotations from math import pi, sqrt def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): if inductance <= 0: raise ValueError('Inductance cannot be 0 or negative' ) elif capacitance <= 0: raise ValueError('Capacitance cannot be 0 or negative' ) else: return ( "Resonant frequency", float(1 / (2 * pi * (sqrt(inductance * capacitance ))) ), ) if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = DiTPipeline A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A_ : List[Any] = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A_ : Tuple = False def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : List[str] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : str = AutoencoderKL() __lowerCAmelCase : Union[str, Any] = DDIMScheduler() __lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = 'cpu' __lowerCAmelCase : Any = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images __lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) __lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 ) def __lowerCamelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = torch.manual_seed(0 ) __lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) __lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf'] __lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCamelCase ( self ): __lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) __lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) __lowerCAmelCase : Dict = ['vase', 'umbrella'] __lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1E-1
86
1
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = BarthezTokenizer A_ : Tuple = BarthezTokenizerFast A_ : Dict = True A_ : List[str] = True def __lowerCamelCase ( self ): super().setUp() __lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = tokenizer def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = '<pad>' __lowerCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 ) def __lowerCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2] __lowerCAmelCase : Optional[int] = self.tokenizer( _SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __lowerCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): if not self.test_rust_tokenizer: return __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.' __lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # fmt: off __lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __lowerCAmelCase : Union[str, Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
86
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A__ ( _lowerCamelCase , unittest.TestCase): A_ : str = ShapEImgaImgPipeline A_ : str = ['image'] A_ : int = ['image'] A_ : Tuple = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] A_ : Tuple = False @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): return 8 @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Any = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): __lowerCAmelCase : Any = CLIPImageProcessor( crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , ) return image_processor @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Dict = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE ) return model def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.dummy_prior __lowerCAmelCase : List[Any] = self.dummy_image_encoder __lowerCAmelCase : int = self.dummy_image_processor __lowerCAmelCase : Any = self.dummy_renderer __lowerCAmelCase : Any = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , ) __lowerCAmelCase : Tuple = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): __lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : str = 'cpu' __lowerCAmelCase : Dict = self.get_dummy_components() __lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Any = output.images[0] __lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = torch_device == 'cpu' __lowerCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.get_dummy_components() __lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : List[str] = 2 __lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) for key in inputs.keys(): if key in self.batch_params: __lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]] __lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) __lowerCAmelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) __lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) __lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) __lowerCAmelCase : int = pipe( _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
86
1
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } lowerCamelCase__ = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): for attribute in key.split('.' ): __lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: __lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: __lowerCAmelCase : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[Any] = value elif weight_type == "weight_v": __lowerCAmelCase : Any = value elif weight_type == "bias": __lowerCAmelCase : List[str] = value else: __lowerCAmelCase : List[Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Any = [] __lowerCAmelCase : Optional[int] = fairseq_model.state_dict() __lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , ) __lowerCAmelCase : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowerCAmelCase : int = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2] __lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase ) if "weight_g" in name: __lowerCAmelCase : Union[str, Any] = 'weight_g' elif "weight_v" in name: __lowerCAmelCase : int = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __lowerCAmelCase : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : List[str] = 'weight' else: __lowerCAmelCase : Optional[Any] = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1] __lowerCAmelCase : Any = name.split('.' ) __lowerCAmelCase : List[Any] = int(items[0] ) __lowerCAmelCase : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowerCAmelCase : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowerCAmelCase : int = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __lowerCAmelCase : Optional[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __lowerCAmelCase : Any = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): # load the pre-trained checkpoints __lowerCAmelCase : Any = torch.load(_UpperCamelCase ) __lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] ) __lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase ) else: __lowerCAmelCase : List[str] = WavLMConfig() __lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase ) recursively_load_weights(_UpperCamelCase , _UpperCamelCase ) hf_wavlm.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
86
1
"""simple docstring""" import argparse import torch from transformers import YosoConfig, YosoForMaskedLM def __lowerCAmelCase (_UpperCamelCase ): if "model" in orig_key: __lowerCAmelCase : str = orig_key.replace('model.' , '' ) if "norm1" in orig_key: __lowerCAmelCase : List[str] = orig_key.replace('norm1' , 'attention.output.LayerNorm' ) if "norm2" in orig_key: __lowerCAmelCase : Tuple = orig_key.replace('norm2' , 'output.LayerNorm' ) if "norm" in orig_key: __lowerCAmelCase : Tuple = orig_key.replace('norm' , 'LayerNorm' ) if "transformer" in orig_key: __lowerCAmelCase : Dict = orig_key.split('.' )[0].split('_' )[-1] __lowerCAmelCase : Any = orig_key.replace(F"transformer_{layer_num}" , F"encoder.layer.{layer_num}" ) if "mha.attn" in orig_key: __lowerCAmelCase : Union[str, Any] = orig_key.replace('mha.attn' , 'attention.self' ) if "mha" in orig_key: __lowerCAmelCase : Tuple = orig_key.replace('mha' , 'attention' ) if "W_q" in orig_key: __lowerCAmelCase : Union[str, Any] = orig_key.replace('W_q' , 'self.query' ) if "W_k" in orig_key: __lowerCAmelCase : Union[str, Any] = orig_key.replace('W_k' , 'self.key' ) if "W_v" in orig_key: __lowerCAmelCase : List[str] = orig_key.replace('W_v' , 'self.value' ) if "ff1" in orig_key: __lowerCAmelCase : Any = orig_key.replace('ff1' , 'intermediate.dense' ) if "ff2" in orig_key: __lowerCAmelCase : List[Any] = orig_key.replace('ff2' , 'output.dense' ) if "ff" in orig_key: __lowerCAmelCase : Any = orig_key.replace('ff' , 'output.dense' ) if "mlm_class" in orig_key: __lowerCAmelCase : Dict = orig_key.replace('mlm.mlm_class' , 'cls.predictions.decoder' ) if "mlm" in orig_key: __lowerCAmelCase : Optional[Any] = orig_key.replace('mlm' , 'cls.predictions.transform' ) if "cls" not in orig_key: __lowerCAmelCase : str = 'yoso.' + orig_key return orig_key def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): for key in orig_state_dict.copy().keys(): __lowerCAmelCase : Optional[Any] = orig_state_dict.pop(_UpperCamelCase ) if ("pooler" in key) or ("sen_class" in key): continue else: __lowerCAmelCase : Tuple = val __lowerCAmelCase : Tuple = orig_state_dict['cls.predictions.decoder.bias'] __lowerCAmelCase : List[Any] = torch.arange(_UpperCamelCase ).expand((1, -1) ) + 2 return orig_state_dict def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : int = torch.load(_UpperCamelCase , map_location='cpu' )['model_state_dict'] __lowerCAmelCase : List[str] = YosoConfig.from_json_file(_UpperCamelCase ) __lowerCAmelCase : Optional[Any] = YosoForMaskedLM(_UpperCamelCase ) __lowerCAmelCase : Optional[int] = convert_checkpoint_helper(config.max_position_embeddings , _UpperCamelCase ) print(model.load_state_dict(_UpperCamelCase ) ) model.eval() model.save_pretrained(_UpperCamelCase ) print(F"Checkpoint successfuly converted. Model saved at {pytorch_dump_path}" ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--pytorch_model_path""", default=None, type=str, required=True, help="""Path to YOSO pytorch checkpoint.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The json file for YOSO model config.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCamelCase__ = parser.parse_args() convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
86
"""simple docstring""" import math import sys def __lowerCAmelCase (_UpperCamelCase ): if number != int(_UpperCamelCase ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __lowerCAmelCase : Any = [-1] * (number + 1) __lowerCAmelCase : List[Any] = 0 for i in range(1 , number + 1 ): __lowerCAmelCase : List[Any] = sys.maxsize __lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) ) for j in range(1 , root + 1 ): __lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)] __lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : List[str] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" import math import os import sys def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : str = '' try: with open(_UpperCamelCase , 'rb' ) as binary_file: __lowerCAmelCase : List[str] = binary_file.read() for dat in data: __lowerCAmelCase : Optional[Any] = F"{dat:08b}" result += curr_byte return result except OSError: print('File not accessible' ) sys.exit() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): lexicon.pop(_UpperCamelCase ) __lowerCAmelCase : Tuple = last_match_id if math.loga(_UpperCamelCase ).is_integer(): for curr_key in lexicon: __lowerCAmelCase : Dict = '0' + lexicon[curr_key] __lowerCAmelCase : str = bin(_UpperCamelCase )[2:] def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[int] = {'0': '0', '1': '1'} __lowerCAmelCase , __lowerCAmelCase : List[Any] = '', '' __lowerCAmelCase : str = len(_UpperCamelCase ) for i in range(len(_UpperCamelCase ) ): curr_string += data_bits[i] if curr_string not in lexicon: continue __lowerCAmelCase : List[str] = lexicon[curr_string] result += last_match_id add_key_to_lexicon(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) index += 1 __lowerCAmelCase : Dict = '' while curr_string != "" and curr_string not in lexicon: curr_string += "0" if curr_string != "": __lowerCAmelCase : List[str] = lexicon[curr_string] result += last_match_id return result def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Optional[Any] = os.path.getsize(_UpperCamelCase ) __lowerCAmelCase : List[Any] = bin(_UpperCamelCase )[2:] __lowerCAmelCase : Tuple = len(_UpperCamelCase ) return "0" * (length_length - 1) + file_length_binary + compressed def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = 8 try: with open(_UpperCamelCase , 'wb' ) as opened_file: __lowerCAmelCase : str = [ to_write[i : i + byte_length] for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ) ] if len(result_byte_array[-1] ) % byte_length == 0: result_byte_array.append('10000000' ) else: result_byte_array[-1] += "1" + "0" * ( byte_length - len(result_byte_array[-1] ) - 1 ) for elem in result_byte_array: opened_file.write(int(_UpperCamelCase , 2 ).to_bytes(1 , byteorder='big' ) ) except OSError: print('File not accessible' ) sys.exit() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Any = read_file_binary(_UpperCamelCase ) __lowerCAmelCase : int = compress_data(_UpperCamelCase ) __lowerCAmelCase : Any = add_file_length(_UpperCamelCase , _UpperCamelCase ) write_file_binary(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": compress(sys.argv[1], sys.argv[2])
86
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ): __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : Any = batch_size __lowerCAmelCase : Any = seq_length __lowerCAmelCase : Optional[Any] = is_training __lowerCAmelCase : Any = use_input_mask __lowerCAmelCase : Any = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : Optional[Any] = vocab_size __lowerCAmelCase : Tuple = hidden_size __lowerCAmelCase : str = rotary_dim __lowerCAmelCase : Union[str, Any] = num_hidden_layers __lowerCAmelCase : Union[str, Any] = num_attention_heads __lowerCAmelCase : int = intermediate_size __lowerCAmelCase : List[str] = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[Any] = max_position_embeddings __lowerCAmelCase : Optional[Any] = initializer_range __lowerCAmelCase : Tuple = None __lowerCAmelCase : int = vocab_size - 1 __lowerCAmelCase : Dict = vocab_size - 1 __lowerCAmelCase : int = vocab_size - 1 def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : List[str] = None if self.use_input_mask: __lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Optional[int] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs __lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = 20 __lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCAmelCase : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase : Any = model( input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCAmelCase : int = model( input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = 20 __lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase : Optional[Any] = model( input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCAmelCase : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) @require_flax class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def __lowerCamelCase ( self ): __lowerCAmelCase : int = FlaxGPTJModelTester(self ) def __lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @tooslow def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCAmelCase : Any = False __lowerCAmelCase : Any = model.config.eos_token_id __lowerCAmelCase : Union[str, Any] = jax.jit(model.generate ) __lowerCAmelCase : Optional[Any] = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @is_pt_flax_cross_test def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape __lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : Any = 1 __lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval() __lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) __lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = fx_state with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple() __lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual( len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval() __lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) __lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params ) __lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape __lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = 0 __lowerCAmelCase : Optional[Any] = 1 __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : Optional[Any] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple() __lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE ) with torch.no_grad(): __lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual( len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def __lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
86
1
"""simple docstring""" from typing import List, Optional import numpy as np from ...processing_utils import ProcessorMixin from ...utils import to_numpy class A__ ( _lowerCamelCase): A_ : str = 'EncodecFeatureExtractor' A_ : Any = ('T5Tokenizer', 'T5TokenizerFast') def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = self.feature_extractor __lowerCAmelCase : List[str] = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True ): return self.tokenizer.get_decoder_prompt_ids(task=_SCREAMING_SNAKE_CASE , language=_SCREAMING_SNAKE_CASE , no_timestamps=_SCREAMING_SNAKE_CASE ) def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = kwargs.pop('sampling_rate' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = kwargs.pop('text' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase : str = args[0] __lowerCAmelCase : str = args[1:] if audio is None and text is None: raise ValueError('You need to specify either an `audio` or `text` input to process.' ) if text is not None: __lowerCAmelCase : List[str] = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if audio is not None: __lowerCAmelCase : List[str] = self.feature_extractor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , sampling_rate=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if audio is None: return inputs elif text is None: return audio_inputs else: __lowerCAmelCase : List[str] = audio_inputs['input_values'] if "padding_mask" in audio_inputs: __lowerCAmelCase : int = audio_inputs['padding_mask'] return inputs def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = kwargs.pop('audio' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = kwargs.pop('padding_mask' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase : Optional[Any] = args[0] __lowerCAmelCase : Optional[int] = args[1:] if audio_values is not None: return self._decode_audio(_SCREAMING_SNAKE_CASE , padding_mask=_SCREAMING_SNAKE_CASE ) else: return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): __lowerCAmelCase : int = to_numpy(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = audio_values.shape if padding_mask is None: return list(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = to_numpy(_SCREAMING_SNAKE_CASE ) # match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding** # token (so that the generated audio values are **not** treated as padded tokens) __lowerCAmelCase : str = seq_len - padding_mask.shape[-1] __lowerCAmelCase : Union[str, Any] = 1 - self.feature_extractor.padding_value __lowerCAmelCase : Dict = np.pad(_SCREAMING_SNAKE_CASE , ((0, 0), (0, difference)) , 'constant' , constant_values=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = audio_values.tolist() for i in range(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = np.asarray(audio_values[i] )[ padding_mask[i][None, :] != self.feature_extractor.padding_value ] __lowerCAmelCase : Any = sliced_audio.reshape(_SCREAMING_SNAKE_CASE , -1 ) return audio_values
86
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Tuple = parent __lowerCAmelCase : Optional[int] = 13 __lowerCAmelCase : List[Any] = 7 __lowerCAmelCase : int = True __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : Optional[Any] = 99 __lowerCAmelCase : int = 3_84 __lowerCAmelCase : Union[str, Any] = 2 __lowerCAmelCase : Tuple = 4 __lowerCAmelCase : str = 37 __lowerCAmelCase : Any = 'gelu' __lowerCAmelCase : List[str] = 0.1 __lowerCAmelCase : Any = 0.1 __lowerCAmelCase : Union[str, Any] = 5_12 __lowerCAmelCase : int = 16 __lowerCAmelCase : Union[str, Any] = 2 __lowerCAmelCase : int = 0.02 __lowerCAmelCase : Dict = 3 __lowerCAmelCase : Tuple = 4 __lowerCAmelCase : Tuple = 1_28 __lowerCAmelCase : Optional[int] = 2 __lowerCAmelCase : List[str] = 9 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = None def __lowerCamelCase ( self ): __lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Optional[int] = None if self.use_input_mask: __lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Tuple = None if self.use_token_type_ids: __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : Dict = None __lowerCAmelCase : Union[str, Any] = None if self.use_labels: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Union[str, Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowerCAmelCase : Tuple = [input_ids, input_mask] __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = self.num_labels __lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = self.num_choices __lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase : Tuple = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = self.num_labels __lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) : List[str] = config_and_inputs __lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : List[str] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : str = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : List[Any] = False A_ : str = False A_ : List[Any] = False def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = TFConvBertModelTester(self ) __lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = True if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ): __lowerCAmelCase : int = True __lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' ) __lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: __lowerCAmelCase : List[str] = outputs['encoder_hidden_states'] __lowerCAmelCase : Tuple = outputs['encoder_attentions'] else: __lowerCAmelCase : Optional[int] = outputs['hidden_states'] __lowerCAmelCase : Tuple = outputs['attentions'] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(out_len % 2 , 0 ) __lowerCAmelCase : Optional[Any] = outputs.decoder_attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowerCAmelCase : List[str] = True __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: __lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __lowerCAmelCase : Dict = True __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) ) self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) @require_tf class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : Tuple = [1, 6, 7_68] self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
86
1
"""simple docstring""" from typing import Optional, Tuple, Union import flax import flax.linen as nn import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_ad_blocks_flax import ( FlaxCrossAttnDownBlockaD, FlaxCrossAttnUpBlockaD, FlaxDownBlockaD, FlaxUNetMidBlockaDCrossAttn, FlaxUpBlockaD, ) @flax.struct.dataclass class A__ ( _lowerCamelCase): A_ : jnp.ndarray @flax_register_to_config class A__ ( nn.Module , _lowerCamelCase , _lowerCamelCase): A_ : int = 3_2 A_ : int = 4 A_ : int = 4 A_ : Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) A_ : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") A_ : Union[bool, Tuple[bool]] = False A_ : Tuple[int] = (3_2_0, 6_4_0, 1_2_8_0, 1_2_8_0) A_ : int = 2 A_ : Union[int, Tuple[int]] = 8 A_ : Optional[Union[int, Tuple[int]]] = None A_ : int = 1_2_8_0 A_ : float = 0.0 A_ : bool = False A_ : jnp.dtype = jnp.floataa A_ : bool = True A_ : int = 0 A_ : bool = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): # init input tensors __lowerCAmelCase : Optional[Any] = (1, self.in_channels, self.sample_size, self.sample_size) __lowerCAmelCase : int = jnp.zeros(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) __lowerCAmelCase : str = jnp.ones((1,) , dtype=jnp.intaa ) __lowerCAmelCase : Any = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa ) __lowerCAmelCase , __lowerCAmelCase : List[str] = jax.random.split(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = {'params': params_rng, 'dropout': dropout_rng} return self.init(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )["params"] def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.block_out_channels __lowerCAmelCase : List[str] = block_out_channels[0] * 4 if self.num_attention_heads is not None: raise ValueError( 'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' ) # If `num_attention_heads` is not defined (which is the case for most models) # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. # The reason for this behavior is to correct for incorrectly named variables that were introduced # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking # which is why we correct for the naming here. __lowerCAmelCase : Dict = self.num_attention_heads or self.attention_head_dim # input __lowerCAmelCase : Union[str, Any] = nn.Conv( block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) # time __lowerCAmelCase : Union[str, Any] = FlaxTimesteps( block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift ) __lowerCAmelCase : Optional[int] = FlaxTimestepEmbedding(_SCREAMING_SNAKE_CASE , dtype=self.dtype ) __lowerCAmelCase : Optional[Any] = self.only_cross_attention if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = (only_cross_attention,) * len(self.down_block_types ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = (num_attention_heads,) * len(self.down_block_types ) # down __lowerCAmelCase : str = [] __lowerCAmelCase : Optional[int] = block_out_channels[0] for i, down_block_type in enumerate(self.down_block_types ): __lowerCAmelCase : Optional[Any] = output_channel __lowerCAmelCase : Dict = block_out_channels[i] __lowerCAmelCase : Optional[int] = i == len(_SCREAMING_SNAKE_CASE ) - 1 if down_block_type == "CrossAttnDownBlock2D": __lowerCAmelCase : Any = FlaxCrossAttnDownBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __lowerCAmelCase : List[Any] = FlaxDownBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , ) down_blocks.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = down_blocks # mid __lowerCAmelCase : List[str] = FlaxUNetMidBlockaDCrossAttn( in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) # up __lowerCAmelCase : Union[str, Any] = [] __lowerCAmelCase : int = list(reversed(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Dict = list(reversed(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Any = list(reversed(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Optional[int] = reversed_block_out_channels[0] for i, up_block_type in enumerate(self.up_block_types ): __lowerCAmelCase : Optional[int] = output_channel __lowerCAmelCase : Any = reversed_block_out_channels[i] __lowerCAmelCase : Optional[Any] = reversed_block_out_channels[min(i + 1 , len(_SCREAMING_SNAKE_CASE ) - 1 )] __lowerCAmelCase : Optional[int] = i == len(_SCREAMING_SNAKE_CASE ) - 1 if up_block_type == "CrossAttnUpBlock2D": __lowerCAmelCase : Union[str, Any] = FlaxCrossAttnUpBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , ) else: __lowerCAmelCase : Tuple = FlaxUpBlockaD( in_channels=_SCREAMING_SNAKE_CASE , out_channels=_SCREAMING_SNAKE_CASE , prev_output_channel=_SCREAMING_SNAKE_CASE , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , ) up_blocks.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = output_channel __lowerCAmelCase : int = up_blocks # out __lowerCAmelCase : Tuple = nn.GroupNorm(num_groups=32 , epsilon=1E-5 ) __lowerCAmelCase : Optional[Any] = nn.Conv( self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , ): # 1. time if not isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ): __lowerCAmelCase : Optional[Any] = jnp.array([timesteps] , dtype=jnp.intaa ) elif isinstance(_SCREAMING_SNAKE_CASE , jnp.ndarray ) and len(timesteps.shape ) == 0: __lowerCAmelCase : Optional[int] = timesteps.astype(dtype=jnp.floataa ) __lowerCAmelCase : List[str] = jnp.expand_dims(_SCREAMING_SNAKE_CASE , 0 ) __lowerCAmelCase : Dict = self.time_proj(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = self.time_embedding(_SCREAMING_SNAKE_CASE ) # 2. pre-process __lowerCAmelCase : Union[str, Any] = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 2, 3, 1) ) __lowerCAmelCase : Union[str, Any] = self.conv_in(_SCREAMING_SNAKE_CASE ) # 3. down __lowerCAmelCase : str = (sample,) for down_block in self.down_blocks: if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase , __lowerCAmelCase : Optional[int] = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) else: __lowerCAmelCase , __lowerCAmelCase : Tuple = down_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) down_block_res_samples += res_samples if down_block_additional_residuals is not None: __lowerCAmelCase : str = () for down_block_res_sample, down_block_additional_residual in zip( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): down_block_res_sample += down_block_additional_residual new_down_block_res_samples += (down_block_res_sample,) __lowerCAmelCase : Any = new_down_block_res_samples # 4. mid __lowerCAmelCase : Union[str, Any] = self.mid_block(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=not train ) if mid_block_additional_residual is not None: sample += mid_block_additional_residual # 5. up for up_block in self.up_blocks: __lowerCAmelCase : Dict = down_block_res_samples[-(self.layers_per_block + 1) :] __lowerCAmelCase : str = down_block_res_samples[: -(self.layers_per_block + 1)] if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = up_block( _SCREAMING_SNAKE_CASE , temb=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , res_hidden_states_tuple=_SCREAMING_SNAKE_CASE , deterministic=not train , ) else: __lowerCAmelCase : Dict = up_block(_SCREAMING_SNAKE_CASE , temb=_SCREAMING_SNAKE_CASE , res_hidden_states_tuple=_SCREAMING_SNAKE_CASE , deterministic=not train ) # 6. post-process __lowerCAmelCase : str = self.conv_norm_out(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = nn.silu(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.conv_out(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = jnp.transpose(_SCREAMING_SNAKE_CASE , (0, 3, 1, 2) ) if not return_dict: return (sample,) return FlaxUNetaDConditionOutput(sample=_SCREAMING_SNAKE_CASE )
86
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class A__ ( unittest.TestCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33} __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : int = batch_size __lowerCAmelCase : str = num_channels __lowerCAmelCase : Optional[int] = min_resolution __lowerCAmelCase : List[Any] = max_resolution __lowerCAmelCase : Union[str, Any] = do_resize __lowerCAmelCase : Optional[Any] = size __lowerCAmelCase : Dict = do_rescale __lowerCAmelCase : Optional[Any] = rescale_factor __lowerCAmelCase : Any = do_normalize __lowerCAmelCase : List[str] = image_mean __lowerCAmelCase : Union[str, Any] = image_std __lowerCAmelCase : Optional[int] = do_pad def __lowerCamelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): if not batched: __lowerCAmelCase : str = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): __lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size else: __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2] if w < h: __lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w ) __lowerCAmelCase : Optional[int] = self.size['shortest_edge'] elif w > h: __lowerCAmelCase : str = self.size['shortest_edge'] __lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h ) else: __lowerCAmelCase : str = self.size['shortest_edge'] __lowerCAmelCase : Optional[Any] = self.size['shortest_edge'] else: __lowerCAmelCase : str = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] __lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__ ( _lowerCamelCase , unittest.TestCase): A_ : List[str] = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): # Initialize image_processing __lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): # Initialize image_processing __lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): # Initialize image_processing __lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): # prepare image and target __lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: __lowerCAmelCase : Any = json.loads(f.read() ) __lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target} # encode them __lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' ) __lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values __lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes __lowerCAmelCase : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __lowerCAmelCase : Dict = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd __lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels __lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size __lowerCAmelCase : int = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size __lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def __lowerCamelCase ( self ): # prepare image, target and masks_path __lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: __lowerCAmelCase : Optional[int] = json.loads(f.read() ) __lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target} __lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them __lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' ) __lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values __lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes __lowerCAmelCase : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __lowerCAmelCase : str = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd __lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels __lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks __lowerCAmelCase : Dict = 82_28_73 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size __lowerCAmelCase : str = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size __lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
86
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch from ..models.clipseg import CLIPSegForImageSegmentation from ..utils import is_vision_available, requires_backends from .base import PipelineTool if is_vision_available(): from PIL import Image class A__ ( _lowerCamelCase): A_ : int = ( 'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.' 'It takes two arguments named `image` which should be the original image, and `label` which should be a text ' 'describing the elements what should be identified in the segmentation mask. The tool returns the mask.' ) A_ : Optional[int] = 'CIDAS/clipseg-rd64-refined' A_ : Any = 'image_segmenter' A_ : Union[str, Any] = CLIPSegForImageSegmentation A_ : Any = ['image', 'text'] A_ : List[Any] = ['image'] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(self , ['vision'] ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): return self.pre_processor(text=[label] , images=[image] , padding=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = self.model(**_SCREAMING_SNAKE_CASE ).logits return logits def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = outputs.cpu().detach().numpy() __lowerCAmelCase : int = 0 __lowerCAmelCase : Optional[int] = 1 return Image.fromarray((array * 2_55).astype(np.uinta ) )
86
"""simple docstring""" import numpy as np def __lowerCAmelCase (_UpperCamelCase ): return 1 / (1 + np.exp(-vector )) def __lowerCAmelCase (_UpperCamelCase ): return vector * sigmoid(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse("""3.8"""): import importlib_metadata else: import importlib.metadata as importlib_metadata def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ): try: __lowerCAmelCase : str = os.environ[key] except KeyError: # KEY isn't set, default to `default`. __lowerCAmelCase : str = default else: # KEY is set, convert it to True or False. try: __lowerCAmelCase : Optional[int] = strtobool(_UpperCamelCase ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F"If set, {key} must be yes or no." ) return _value lowerCamelCase__ = parse_flag_from_env("""RUN_SLOW""", default=False) lowerCamelCase__ = parse_flag_from_env("""RUN_REMOTE""", default=False) lowerCamelCase__ = parse_flag_from_env("""RUN_LOCAL""", default=True) lowerCamelCase__ = parse_flag_from_env("""RUN_PACKAGED""", default=True) # Compression lowerCamelCase__ = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason="""test requires lz4""") lowerCamelCase__ = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason="""test requires py7zr""") lowerCamelCase__ = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason="""test requires zstandard""") # Audio lowerCamelCase__ = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec("""soundfile""") is None or version.parse(importlib_metadata.version("""soundfile""")) < version.parse("""0.12.0"""), reason="""test requires sndfile>=0.12.1: 'pip install \"soundfile>=0.12.1\"'; """, ) # Beam lowerCamelCase__ = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse("""0.3.2"""), reason="""test requires apache-beam and a compatible dill version""", ) # Dill-cloudpickle compatibility lowerCamelCase__ = pytest.mark.skipif( config.DILL_VERSION <= version.parse("""0.3.2"""), reason="""test requires dill>0.3.2 for cloudpickle compatibility""", ) # Windows lowerCamelCase__ = pytest.mark.skipif( sys.platform == """win32""", reason="""test should not be run on Windows""", ) def __lowerCAmelCase (_UpperCamelCase ): try: import faiss # noqa except ImportError: __lowerCAmelCase : Dict = unittest.skip('test requires faiss' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import regex # noqa except ImportError: __lowerCAmelCase : Dict = unittest.skip('test requires regex' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import elasticsearch # noqa except ImportError: __lowerCAmelCase : Optional[int] = unittest.skip('test requires elasticsearch' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import sqlalchemy # noqa except ImportError: __lowerCAmelCase : Union[str, Any] = unittest.skip('test requires sqlalchemy' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.TORCH_AVAILABLE: __lowerCAmelCase : List[str] = unittest.skip('test requires PyTorch' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.TF_AVAILABLE: __lowerCAmelCase : List[Any] = unittest.skip('test requires TensorFlow' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.JAX_AVAILABLE: __lowerCAmelCase : Tuple = unittest.skip('test requires JAX' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not config.PIL_AVAILABLE: __lowerCAmelCase : Tuple = unittest.skip('test requires Pillow' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import transformers # noqa F401 except ImportError: return unittest.skip('test requires transformers' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import tiktoken # noqa F401 except ImportError: return unittest.skip('test requires tiktoken' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import spacy # noqa F401 except ImportError: return unittest.skip('test requires spacy' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): def _require_spacy_model(_UpperCamelCase ): try: import spacy # noqa F401 spacy.load(_UpperCamelCase ) except ImportError: return unittest.skip('test requires spacy' )(_UpperCamelCase ) except OSError: return unittest.skip('test requires spacy model \'{}\''.format(_UpperCamelCase ) )(_UpperCamelCase ) else: return test_case return _require_spacy_model def __lowerCAmelCase (_UpperCamelCase ): try: import pyspark # noqa F401 except ImportError: return unittest.skip('test requires pyspark' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): try: import joblibspark # noqa F401 except ImportError: return unittest.skip('test requires joblibspark' )(_UpperCamelCase ) else: return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_slow_tests or _run_slow_tests == 0: __lowerCAmelCase : Union[str, Any] = unittest.skip('test is slow' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_local_tests or _run_local_tests == 0: __lowerCAmelCase : str = unittest.skip('test is local' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_packaged_tests or _run_packaged_tests == 0: __lowerCAmelCase : Union[str, Any] = unittest.skip('test is packaged' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (_UpperCamelCase ): if not _run_remote_tests or _run_remote_tests == 0: __lowerCAmelCase : int = unittest.skip('test requires remote' )(_UpperCamelCase ) return test_case def __lowerCAmelCase (*_UpperCamelCase ): def decorate(cls ): for name, fn in cls.__dict__.items(): if callable(_UpperCamelCase ) and name.startswith('test' ): for decorator in decorators: __lowerCAmelCase : Any = decorator(_UpperCamelCase ) setattr(cls , _UpperCamelCase , _UpperCamelCase ) return cls return decorate class A__ ( _lowerCamelCase): pass class A__ ( _lowerCamelCase): A_ : Tuple = 0 A_ : List[Any] = 1 A_ : Optional[int] = 2 @contextmanager def __lowerCAmelCase (_UpperCamelCase=OfflineSimulationMode.CONNECTION_FAILS , _UpperCamelCase=1e-1_6 ): __lowerCAmelCase : List[str] = requests.Session().request def timeout_request(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): # Change the url to an invalid url so that the connection hangs __lowerCAmelCase : Optional[int] = 'https://10.255.255.1' if kwargs.get('timeout' ) is None: raise RequestWouldHangIndefinitelyError( F"Tried a call to {url} in offline mode with no timeout set. Please set a timeout." ) __lowerCAmelCase : List[Any] = timeout try: return online_request(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier __lowerCAmelCase : Tuple = url __lowerCAmelCase : Optional[int] = e.args[0] __lowerCAmelCase : Any = (max_retry_error.args[0].replace('10.255.255.1' , F"OfflineMock[{url}]" ),) __lowerCAmelCase : List[str] = (max_retry_error,) raise def raise_connection_error(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): raise requests.ConnectionError('Offline mode is enabled.' , request=_UpperCamelCase ) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch('requests.Session.send' , _UpperCamelCase ): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch('requests.Session.request' , _UpperCamelCase ): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch('datasets.config.HF_DATASETS_OFFLINE' , _UpperCamelCase ): yield else: raise ValueError('Please use a value from the OfflineSimulationMode enum.' ) @contextmanager def __lowerCAmelCase (*_UpperCamelCase , **_UpperCamelCase ): __lowerCAmelCase : Tuple = str(Path().resolve() ) with tempfile.TemporaryDirectory(*_UpperCamelCase , **_UpperCamelCase ) as tmp_dir: try: os.chdir(_UpperCamelCase ) yield finally: os.chdir(_UpperCamelCase ) @contextmanager def __lowerCAmelCase (): import gc gc.collect() __lowerCAmelCase : Optional[Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def __lowerCAmelCase (): import gc gc.collect() __lowerCAmelCase : List[Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): return deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() == deepcopy(_UpperCamelCase ).integers(0 , 100 , 10 ).tolist() def __lowerCAmelCase (_UpperCamelCase ): import decorator from requests.exceptions import HTTPError def _wrapper(_UpperCamelCase , *_UpperCamelCase , **_UpperCamelCase ): try: return func(*_UpperCamelCase , **_UpperCamelCase ) except HTTPError as err: if str(_UpperCamelCase ).startswith('500' ) or str(_UpperCamelCase ).startswith('502' ): pytest.xfail(str(_UpperCamelCase ) ) raise err return decorator.decorator(_wrapper , _UpperCamelCase ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = returncode __lowerCAmelCase : Optional[Any] = stdout __lowerCAmelCase : Optional[int] = stderr async def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): while True: __lowerCAmelCase : Optional[Any] = await stream.readline() if line: callback(_UpperCamelCase ) else: break async def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=False ): if echo: print('\nRunning: ' , ' '.join(_UpperCamelCase ) ) __lowerCAmelCase : Any = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_UpperCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_UpperCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) __lowerCAmelCase : int = [] __lowerCAmelCase : Tuple = [] def tee(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ): __lowerCAmelCase : Tuple = line.decode('utf-8' ).rstrip() sink.append(_UpperCamelCase ) if not quiet: print(_UpperCamelCase , _UpperCamelCase , file=_UpperCamelCase ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stdout , label='stdout:' ) ), _read_stream(p.stderr , lambda _UpperCamelCase : tee(_UpperCamelCase , _UpperCamelCase , sys.stderr , label='stderr:' ) ), ] , timeout=_UpperCamelCase , ) return _RunOutput(await p.wait() , _UpperCamelCase , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=180 , _UpperCamelCase=False , _UpperCamelCase=True ): __lowerCAmelCase : List[Any] = asyncio.get_event_loop() __lowerCAmelCase : Union[str, Any] = loop.run_until_complete( _stream_subprocess(_UpperCamelCase , env=_UpperCamelCase , stdin=_UpperCamelCase , timeout=_UpperCamelCase , quiet=_UpperCamelCase , echo=_UpperCamelCase ) ) __lowerCAmelCase : List[Any] = ' '.join(_UpperCamelCase ) if result.returncode > 0: __lowerCAmelCase : Union[str, Any] = '\n'.join(result.stderr ) raise RuntimeError( F"'{cmd_str}' failed with returncode {result.returncode}\n\n" F"The combined stderr from workers follows:\n{stderr}" ) # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(F"'{cmd_str}' produced no output." ) return result def __lowerCAmelCase (): __lowerCAmelCase : str = os.environ.get('PYTEST_XDIST_WORKER' , 'gw0' ) __lowerCAmelCase : Any = re.sub(r'^gw' , '' , _UpperCamelCase , 0 , re.M ) return int(_UpperCamelCase ) def __lowerCAmelCase (): __lowerCAmelCase : int = 2_9500 __lowerCAmelCase : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
86
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Union[str, Any] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : Dict = is_training __lowerCAmelCase : List[str] = use_input_mask __lowerCAmelCase : int = use_token_type_ids __lowerCAmelCase : Optional[int] = use_labels __lowerCAmelCase : List[Any] = vocab_size __lowerCAmelCase : Dict = hidden_size __lowerCAmelCase : Tuple = embedding_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Union[str, Any] = intermediate_size __lowerCAmelCase : Optional[Any] = hidden_act __lowerCAmelCase : Optional[int] = hidden_dropout_prob __lowerCAmelCase : Dict = attention_probs_dropout_prob __lowerCAmelCase : Any = max_position_embeddings __lowerCAmelCase : Any = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : List[str] = initializer_range __lowerCAmelCase : str = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : Union[str, Any] = scope def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Optional[int] = None if self.use_input_mask: __lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : str = None if self.use_token_type_ids: __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Union[str, Any] = None if self.use_labels: __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Dict = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[Any] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[str] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = self.num_labels __lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = self.num_labels __lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = self.num_choices __lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[str] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) : Optional[Any] = config_and_inputs __lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : str = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) A_ : List[str] = ( { 'feature-extraction': MobileBertModel, 'fill-mask': MobileBertForMaskedLM, 'question-answering': MobileBertForQuestionAnswering, 'text-classification': MobileBertForSequenceClassification, 'token-classification': MobileBertForTokenClassification, 'zero-shot': MobileBertForSequenceClassification, } if is_torch_available() else {} ) A_ : Dict = True def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = MobileBertModelTester(self ) __lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCAmelCase (_UpperCamelCase ): return torch.tensor( _UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , ) lowerCamelCase__ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=_SCREAMING_SNAKE_CASE , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE __lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) __lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
86
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class A__ ( metaclass=_lowerCamelCase): A_ : Optional[int] = ['keras_nlp'] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(self , ['keras_nlp'] )
86
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class A__ ( _lowerCamelCase): A_ : Any = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'AutoImageProcessor' A_ : str = 'AutoTokenizer' def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = kwargs.pop('feature_extractor' ) __lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = self.image_processor __lowerCAmelCase : Tuple = False def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase : Dict = args[0] __lowerCAmelCase : Union[str, Any] = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None: __lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: __lowerCAmelCase : Union[str, Any] = encodings['input_ids'] return inputs def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @contextmanager def __lowerCamelCase ( self ): warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.' ) __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = self.tokenizer yield __lowerCAmelCase : Optional[int] = self.image_processor __lowerCAmelCase : Optional[Any] = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ): if added_vocab is None: __lowerCAmelCase : str = self.tokenizer.get_added_vocab() __lowerCAmelCase : List[Any] = {} while tokens: __lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break __lowerCAmelCase : Union[str, Any] = start_token.group(1 ) __lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) __lowerCAmelCase : str = start_token.group() if end_token is None: __lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' ) else: __lowerCAmelCase : Optional[Any] = end_token.group() __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: __lowerCAmelCase : List[str] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if value: if len(_SCREAMING_SNAKE_CASE ) == 1: __lowerCAmelCase : Tuple = value[0] __lowerCAmelCase : Tuple = value else: # leaf nodes __lowerCAmelCase : Any = [] for leaf in content.split(R'<sep/>' ): __lowerCAmelCase : List[Any] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens output[key].append(_SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: __lowerCAmelCase : str = output[key][0] __lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor
86
1
"""simple docstring""" import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class A__ ( _lowerCamelCase): # to overwrite at feature extractactor specific tests A_ : Tuple = None A_ : Any = None @property def __lowerCamelCase ( self ): return self.feat_extract_tester.prepare_feat_extract_dict() def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'feature_size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'sampling_rate' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'padding_value' ) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common() __lowerCAmelCase : int = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0] __lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE ) for x, y in zip(_SCREAMING_SNAKE_CASE , processed_features[input_name] ) ) ) __lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) __lowerCAmelCase : List[Any] = processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowerCAmelCase : Dict = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCAmelCase : Dict = feat_extract.model_input_names[0] __lowerCAmelCase : Optional[int] = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) __lowerCAmelCase : Optional[int] = processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowerCAmelCase : int = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCAmelCase : List[str] = feat_extract.model_input_names[0] __lowerCAmelCase : Any = BatchFeature({input_name: speech_inputs} , tensor_type='tf' ) __lowerCAmelCase : Tuple = processed_features[input_name] if len(batch_features_input.shape ) < 3: __lowerCAmelCase : Optional[int] = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ): def _inputs_have_equal_length(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = len(input[0] ) for input_slice in input[1:]: if len(_SCREAMING_SNAKE_CASE ) != length: return False return True def _inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): return False for input_slice_a, input_slice_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not np.allclose(np.asarray(_SCREAMING_SNAKE_CASE ) , np.asarray(_SCREAMING_SNAKE_CASE ) , atol=1E-3 ): return False return True __lowerCAmelCase : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCAmelCase : Any = self.feat_extract_tester.prepare_inputs_for_common(numpify=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = feat_extract.model_input_names[0] __lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} ) __lowerCAmelCase : Optional[Any] = self.feat_extract_tester.seq_length_diff __lowerCAmelCase : Optional[int] = self.feat_extract_tester.max_seq_length + pad_diff __lowerCAmelCase : int = self.feat_extract_tester.min_seq_length __lowerCAmelCase : List[str] = self.feat_extract_tester.batch_size __lowerCAmelCase : Union[str, Any] = self.feat_extract_tester.feature_size # test padding for List[int] + numpy __lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = input_a[input_name] __lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' ) __lowerCAmelCase : Union[str, Any] = input_a[input_name] __lowerCAmelCase : Optional[Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[-1] ) ) __lowerCAmelCase : List[Any] = input_a[input_name] __lowerCAmelCase : Union[str, Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' ) __lowerCAmelCase : Any = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_SCREAMING_SNAKE_CASE ): feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' )[input_name] __lowerCAmelCase : List[Any] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , return_tensors='np' ) __lowerCAmelCase : Dict = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy __lowerCAmelCase : Optional[int] = feat_extract.pad(_SCREAMING_SNAKE_CASE , pad_to_multiple_of=10 ) __lowerCAmelCase : Tuple = input_a[input_name] __lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , pad_to_multiple_of=10 ) __lowerCAmelCase : Union[str, Any] = input_a[input_name] __lowerCAmelCase : int = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , pad_to_multiple_of=10 , max_length=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = input_a[input_name] __lowerCAmelCase : int = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , pad_to_multiple_of=10 , max_length=_SCREAMING_SNAKE_CASE , return_tensors='np' , ) __lowerCAmelCase : int = input_a[input_name] self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) % 10 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : List[str] = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10 self.assertTrue(all(len(_SCREAMING_SNAKE_CASE ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct __lowerCAmelCase : List[str] = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1E-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1E-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1E-3 ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ): def _inputs_have_equal_length(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = len(input[0] ) for input_slice in input[1:]: if len(_SCREAMING_SNAKE_CASE ) != length: return False return True def _inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ): return False for input_slice_a, input_slice_a in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if not np.allclose(np.asarray(_SCREAMING_SNAKE_CASE ) , np.asarray(_SCREAMING_SNAKE_CASE ) , atol=1E-3 ): return False return True __lowerCAmelCase : Dict = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCAmelCase : Optional[int] = self.feat_extract_tester.prepare_inputs_for_common(numpify=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = feat_extract.model_input_names[0] __lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} ) # truncate to smallest __lowerCAmelCase : List[Any] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = input_a[input_name] __lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) ) __lowerCAmelCase : int = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) # truncate to smallest with np __lowerCAmelCase : Optional[Any] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : int = input_a[input_name] __lowerCAmelCase : List[str] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' ) __lowerCAmelCase : str = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) # truncate to middle __lowerCAmelCase : Dict = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' , ) __lowerCAmelCase : Tuple = input_a[input_name] __lowerCAmelCase : Optional[Any] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = input_a[input_name] __lowerCAmelCase : Optional[int] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' ) __lowerCAmelCase : Optional[Any] = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(_inputs_are_equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_SCREAMING_SNAKE_CASE ): feat_extract.pad(_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_SCREAMING_SNAKE_CASE ): feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , truncation=_SCREAMING_SNAKE_CASE )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_SCREAMING_SNAKE_CASE ): feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , truncation=_SCREAMING_SNAKE_CASE )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_SCREAMING_SNAKE_CASE ): feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy __lowerCAmelCase : str = 12 __lowerCAmelCase : Tuple = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : int = input_a[input_name] __lowerCAmelCase : Optional[int] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of __lowerCAmelCase : Union[str, Any] = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: __lowerCAmelCase : Optional[int] = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) self.assertFalse(_inputs_have_equal_length(_SCREAMING_SNAKE_CASE ) ) def __lowerCamelCase ( self ): self._check_padding(numpify=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): self._check_padding(numpify=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): self._check_truncation(numpify=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): self._check_truncation(numpify=_SCREAMING_SNAKE_CASE ) @require_torch def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCAmelCase : int = self.feat_extract_tester.prepare_inputs_for_common() __lowerCAmelCase : List[str] = feat_extract.model_input_names[0] __lowerCAmelCase : Tuple = BatchFeature({input_name: speech_inputs} ) __lowerCAmelCase : Any = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name] __lowerCAmelCase : Union[str, Any] = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1E-2 ) @require_tf def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.feature_extraction_class(**self.feat_extract_dict ) __lowerCAmelCase : Dict = self.feat_extract_tester.prepare_inputs_for_common() __lowerCAmelCase : Any = feat_extract.model_input_names[0] __lowerCAmelCase : str = BatchFeature({input_name: speech_inputs} ) __lowerCAmelCase : str = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' )[input_name] __lowerCAmelCase : Dict = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1E-2 ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.feat_extract_dict __lowerCAmelCase : int = True __lowerCAmelCase : Tuple = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = self.feat_extract_tester.prepare_inputs_for_common() __lowerCAmelCase : Optional[int] = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs] __lowerCAmelCase : List[Any] = feat_extract.model_input_names[0] __lowerCAmelCase : Union[str, Any] = BatchFeature({input_name: speech_inputs} ) __lowerCAmelCase : Tuple = feat_extract.pad(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.feat_extract_dict __lowerCAmelCase : str = True __lowerCAmelCase : Optional[Any] = self.feature_extraction_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.feat_extract_tester.prepare_inputs_for_common() __lowerCAmelCase : Optional[int] = [len(_SCREAMING_SNAKE_CASE ) for x in speech_inputs] __lowerCAmelCase : Tuple = feat_extract.model_input_names[0] __lowerCAmelCase : List[Any] = BatchFeature({input_name: speech_inputs} ) __lowerCAmelCase : str = min(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = feat_extract.pad( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='np' ) self.assertIn('attention_mask' , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
86
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Tuple = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __lowerCAmelCase (_UpperCamelCase = 100 ): __lowerCAmelCase : Optional[int] = 1 __lowerCAmelCase : Optional[Any] = 2 for i in range(2 , max_n + 1 ): __lowerCAmelCase : Any = pre_numerator __lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1 __lowerCAmelCase : int = cur_numerator __lowerCAmelCase : Dict = e_cont * pre_numerator + temp return sum_digits(_UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
86
1
"""simple docstring""" import argparse import json import torch from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=1 ): if n_shave_prefix_segments >= 0: return ".".join(path.split('.' )[n_shave_prefix_segments:] ) else: return ".".join(path.split('.' )[:n_shave_prefix_segments] ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=0 ): __lowerCAmelCase : Optional[int] = [] for old_item in old_list: __lowerCAmelCase : Optional[Any] = old_item.replace('in_layers.0' , 'norm1' ) __lowerCAmelCase : Optional[int] = new_item.replace('in_layers.2' , 'conv1' ) __lowerCAmelCase : Dict = new_item.replace('out_layers.0' , 'norm2' ) __lowerCAmelCase : Any = new_item.replace('out_layers.3' , 'conv2' ) __lowerCAmelCase : List[Any] = new_item.replace('emb_layers.1' , 'time_emb_proj' ) __lowerCAmelCase : List[Any] = new_item.replace('skip_connection' , 'conv_shortcut' ) __lowerCAmelCase : Tuple = shave_segments(_UpperCamelCase , n_shave_prefix_segments=_UpperCamelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=0 ): __lowerCAmelCase : List[Any] = [] for old_item in old_list: __lowerCAmelCase : Tuple = old_item __lowerCAmelCase : Tuple = new_item.replace('norm.weight' , 'group_norm.weight' ) __lowerCAmelCase : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' ) __lowerCAmelCase : str = new_item.replace('proj_out.weight' , 'proj_attn.weight' ) __lowerCAmelCase : int = new_item.replace('proj_out.bias' , 'proj_attn.bias' ) __lowerCAmelCase : Optional[Any] = shave_segments(_UpperCamelCase , n_shave_prefix_segments=_UpperCamelCase ) mapping.append({'old': old_item, 'new': new_item} ) return mapping def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None ): assert isinstance(_UpperCamelCase , _UpperCamelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys." # Splits the attention layers into three variables. if attention_paths_to_split is not None: for path, path_map in attention_paths_to_split.items(): __lowerCAmelCase : Any = old_checkpoint[path] __lowerCAmelCase : Tuple = old_tensor.shape[0] // 3 __lowerCAmelCase : List[Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1) __lowerCAmelCase : Union[str, Any] = old_tensor.shape[0] // config['num_head_channels'] // 3 __lowerCAmelCase : Optional[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = old_tensor.split(channels // num_heads , dim=1 ) __lowerCAmelCase : Any = query.reshape(_UpperCamelCase ) __lowerCAmelCase : int = key.reshape(_UpperCamelCase ) __lowerCAmelCase : Optional[Any] = value.reshape(_UpperCamelCase ) for path in paths: __lowerCAmelCase : List[Any] = path['new'] # These have already been assigned if attention_paths_to_split is not None and new_path in attention_paths_to_split: continue # Global renaming happens here __lowerCAmelCase : List[Any] = new_path.replace('middle_block.0' , 'mid_block.resnets.0' ) __lowerCAmelCase : Any = new_path.replace('middle_block.1' , 'mid_block.attentions.0' ) __lowerCAmelCase : List[str] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' ) if additional_replacements is not None: for replacement in additional_replacements: __lowerCAmelCase : Optional[Any] = new_path.replace(replacement['old'] , replacement['new'] ) # proj_attn.weight has to be converted from conv 1D to linear if "proj_attn.weight" in new_path: __lowerCAmelCase : Union[str, Any] = old_checkpoint[path['old']][:, :, 0] else: __lowerCAmelCase : Tuple = old_checkpoint[path['old']] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : str = {} __lowerCAmelCase : Any = checkpoint['time_embed.0.weight'] __lowerCAmelCase : Tuple = checkpoint['time_embed.0.bias'] __lowerCAmelCase : Optional[Any] = checkpoint['time_embed.2.weight'] __lowerCAmelCase : List[str] = checkpoint['time_embed.2.bias'] __lowerCAmelCase : str = checkpoint['input_blocks.0.0.weight'] __lowerCAmelCase : int = checkpoint['input_blocks.0.0.bias'] __lowerCAmelCase : Any = checkpoint['out.0.weight'] __lowerCAmelCase : Any = checkpoint['out.0.bias'] __lowerCAmelCase : List[str] = checkpoint['out.2.weight'] __lowerCAmelCase : Tuple = checkpoint['out.2.bias'] # Retrieves the keys for the input blocks only __lowerCAmelCase : str = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} ) __lowerCAmelCase : List[Any] = { layer_id: [key for key in checkpoint if F"input_blocks.{layer_id}" in key] for layer_id in range(_UpperCamelCase ) } # Retrieves the keys for the middle blocks only __lowerCAmelCase : Tuple = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} ) __lowerCAmelCase : List[Any] = { layer_id: [key for key in checkpoint if F"middle_block.{layer_id}" in key] for layer_id in range(_UpperCamelCase ) } # Retrieves the keys for the output blocks only __lowerCAmelCase : str = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} ) __lowerCAmelCase : Optional[int] = { layer_id: [key for key in checkpoint if F"output_blocks.{layer_id}" in key] for layer_id in range(_UpperCamelCase ) } for i in range(1 , _UpperCamelCase ): __lowerCAmelCase : List[Any] = (i - 1) // (config['num_res_blocks'] + 1) __lowerCAmelCase : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1) __lowerCAmelCase : Optional[Any] = [key for key in input_blocks[i] if F"input_blocks.{i}.0" in key] __lowerCAmelCase : List[str] = [key for key in input_blocks[i] if F"input_blocks.{i}.1" in key] if F"input_blocks.{i}.0.op.weight" in checkpoint: __lowerCAmelCase : List[Any] = checkpoint[ F"input_blocks.{i}.0.op.weight" ] __lowerCAmelCase : Optional[Any] = checkpoint[ F"input_blocks.{i}.0.op.bias" ] continue __lowerCAmelCase : Union[str, Any] = renew_resnet_paths(_UpperCamelCase ) __lowerCAmelCase : Dict = {'old': F"input_blocks.{i}.0", 'new': F"down_blocks.{block_id}.resnets.{layer_in_block_id}"} __lowerCAmelCase : Tuple = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'} assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path, resnet_op] , config=_UpperCamelCase ) if len(_UpperCamelCase ): __lowerCAmelCase : Tuple = renew_attention_paths(_UpperCamelCase ) __lowerCAmelCase : Any = { 'old': F"input_blocks.{i}.1", 'new': F"down_blocks.{block_id}.attentions.{layer_in_block_id}", } __lowerCAmelCase : Union[str, Any] = { F"input_blocks.{i}.1.qkv.bias": { 'key': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", 'query': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", 'value': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"input_blocks.{i}.1.qkv.weight": { 'key': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", 'query': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", 'value': F"down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=_UpperCamelCase , config=_UpperCamelCase , ) __lowerCAmelCase : str = middle_blocks[0] __lowerCAmelCase : str = middle_blocks[1] __lowerCAmelCase : Optional[Any] = middle_blocks[2] __lowerCAmelCase : str = renew_resnet_paths(_UpperCamelCase ) assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , config=_UpperCamelCase ) __lowerCAmelCase : Tuple = renew_resnet_paths(_UpperCamelCase ) assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , config=_UpperCamelCase ) __lowerCAmelCase : str = renew_attention_paths(_UpperCamelCase ) __lowerCAmelCase : Any = { 'middle_block.1.qkv.bias': { 'key': 'mid_block.attentions.0.key.bias', 'query': 'mid_block.attentions.0.query.bias', 'value': 'mid_block.attentions.0.value.bias', }, 'middle_block.1.qkv.weight': { 'key': 'mid_block.attentions.0.key.weight', 'query': 'mid_block.attentions.0.query.weight', 'value': 'mid_block.attentions.0.value.weight', }, } assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , attention_paths_to_split=_UpperCamelCase , config=_UpperCamelCase ) for i in range(_UpperCamelCase ): __lowerCAmelCase : Tuple = i // (config['num_res_blocks'] + 1) __lowerCAmelCase : Optional[Any] = i % (config['num_res_blocks'] + 1) __lowerCAmelCase : Tuple = [shave_segments(_UpperCamelCase , 2 ) for name in output_blocks[i]] __lowerCAmelCase : str = {} for layer in output_block_layers: __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = layer.split('.' )[0], shave_segments(_UpperCamelCase , 1 ) if layer_id in output_block_list: output_block_list[layer_id].append(_UpperCamelCase ) else: __lowerCAmelCase : Tuple = [layer_name] if len(_UpperCamelCase ) > 1: __lowerCAmelCase : Optional[Any] = [key for key in output_blocks[i] if F"output_blocks.{i}.0" in key] __lowerCAmelCase : str = [key for key in output_blocks[i] if F"output_blocks.{i}.1" in key] __lowerCAmelCase : Dict = renew_resnet_paths(_UpperCamelCase ) __lowerCAmelCase : List[str] = renew_resnet_paths(_UpperCamelCase ) __lowerCAmelCase : Any = {'old': F"output_blocks.{i}.0", 'new': F"up_blocks.{block_id}.resnets.{layer_in_block_id}"} assign_to_checkpoint(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , config=_UpperCamelCase ) if ["conv.weight", "conv.bias"] in output_block_list.values(): __lowerCAmelCase : Dict = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] ) __lowerCAmelCase : str = checkpoint[ F"output_blocks.{i}.{index}.conv.weight" ] __lowerCAmelCase : List[str] = checkpoint[ F"output_blocks.{i}.{index}.conv.bias" ] # Clear attentions as they have been attributed above. if len(_UpperCamelCase ) == 2: __lowerCAmelCase : List[Any] = [] if len(_UpperCamelCase ): __lowerCAmelCase : str = renew_attention_paths(_UpperCamelCase ) __lowerCAmelCase : Dict = { 'old': F"output_blocks.{i}.1", 'new': F"up_blocks.{block_id}.attentions.{layer_in_block_id}", } __lowerCAmelCase : Any = { F"output_blocks.{i}.1.qkv.bias": { 'key': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias", 'query': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias", 'value': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias", }, F"output_blocks.{i}.1.qkv.weight": { 'key': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight", 'query': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight", 'value': F"up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight", }, } assign_to_checkpoint( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=_UpperCamelCase , ) else: __lowerCAmelCase : List[Any] = renew_resnet_paths(_UpperCamelCase , n_shave_prefix_segments=1 ) for path in resnet_0_paths: __lowerCAmelCase : List[Any] = '.'.join(['output_blocks', str(_UpperCamelCase ), path['old']] ) __lowerCAmelCase : Dict = '.'.join(['up_blocks', str(_UpperCamelCase ), 'resnets', str(_UpperCamelCase ), path['new']] ) __lowerCAmelCase : Optional[Any] = checkpoint[old_path] return new_checkpoint if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """--checkpoint_path""", default=None, type=str, required=True, help="""Path to the checkpoint to convert.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the architecture.""", ) parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""") lowerCamelCase__ = parser.parse_args() lowerCamelCase__ = torch.load(args.checkpoint_path) with open(args.config_file) as f: lowerCamelCase__ = json.loads(f.read()) lowerCamelCase__ = convert_ldm_checkpoint(checkpoint, config) if "ldm" in config: del config["ldm"] lowerCamelCase__ = UNetaDModel(**config) model.load_state_dict(converted_checkpoint) try: lowerCamelCase__ = DDPMScheduler.from_config("""/""".join(args.checkpoint_path.split("""/""")[:-1])) lowerCamelCase__ = VQModel.from_pretrained("""/""".join(args.checkpoint_path.split("""/""")[:-1])) lowerCamelCase__ = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae) pipe.save_pretrained(args.dump_path) except: # noqa: E722 model.save_pretrained(args.dump_path)
86
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class A__ ( _lowerCamelCase): A_ : List[Any] = 'markuplm' def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = vocab_size __lowerCAmelCase : Any = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Union[str, Any] = hidden_act __lowerCAmelCase : List[Any] = intermediate_size __lowerCAmelCase : List[str] = hidden_dropout_prob __lowerCAmelCase : List[str] = attention_probs_dropout_prob __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : int = type_vocab_size __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : int = layer_norm_eps __lowerCAmelCase : List[str] = position_embedding_type __lowerCAmelCase : List[Any] = use_cache __lowerCAmelCase : Optional[Any] = classifier_dropout # additional properties __lowerCAmelCase : Optional[int] = max_depth __lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings __lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings __lowerCAmelCase : Any = tag_pad_id __lowerCAmelCase : Union[str, Any] = subs_pad_id __lowerCAmelCase : int = xpath_unit_hidden_size
86
1
"""simple docstring""" from abc import ABC, abstractmethod from typing import List, Optional class A__ ( _lowerCamelCase): def __init__( self ): # test for the above condition self.test() def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : List[Any] = False while not completed: if counter == 1: self.reset() __lowerCAmelCase : str = self.advance() if not self.does_advance(_SCREAMING_SNAKE_CASE ): raise Exception( 'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self.update(_SCREAMING_SNAKE_CASE ) counter += 1 if counter > 1_00_00: raise Exception('update() does not fulfill the constraint.' ) if self.remaining() != 0: raise Exception('Custom Constraint is not defined correctly.' ) @abstractmethod def __lowerCamelCase ( self ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def __lowerCamelCase ( self ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def __lowerCamelCase ( self ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) @abstractmethod def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ): raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE ): super(_SCREAMING_SNAKE_CASE , self ).__init__() if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError(f"`token_ids` has to be a non-empty list, but is {token_ids}." ) if any((not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids ): raise ValueError(f"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." ) __lowerCAmelCase : List[Any] = token_ids __lowerCAmelCase : List[Any] = len(self.token_ids ) __lowerCAmelCase : Optional[Any] = -1 # the index of the currently fulfilled step __lowerCAmelCase : Optional[int] = False def __lowerCamelCase ( self ): if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}" ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}" ) __lowerCAmelCase : str = False __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : Tuple = False if self.does_advance(_SCREAMING_SNAKE_CASE ): self.fulfilled_idx += 1 __lowerCAmelCase : Tuple = True if self.fulfilled_idx == (self.seqlen - 1): __lowerCAmelCase : Dict = True __lowerCAmelCase : Tuple = completed else: # failed to make progress. __lowerCAmelCase : Any = True self.reset() return stepped, completed, reset def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = False __lowerCAmelCase : Tuple = 0 def __lowerCamelCase ( self ): return self.seqlen - (self.fulfilled_idx + 1) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : Optional[Any] = PhrasalConstraint(self.token_ids ) if stateful: __lowerCAmelCase : int = self.seqlen __lowerCAmelCase : List[Any] = self.fulfilled_idx __lowerCAmelCase : List[Any] = self.completed return new_constraint class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ): __lowerCAmelCase : str = max([len(_SCREAMING_SNAKE_CASE ) for one in nested_token_ids] ) __lowerCAmelCase : str = {} for token_ids in nested_token_ids: __lowerCAmelCase : List[Any] = root for tidx, token_id in enumerate(_SCREAMING_SNAKE_CASE ): if token_id not in level: __lowerCAmelCase : Union[str, Any] = {} __lowerCAmelCase : Dict = level[token_id] if no_subsets and self.has_subsets(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError( 'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is' f" {nested_token_ids}." ) __lowerCAmelCase : List[str] = root def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = self.trie for current_token in current_seq: __lowerCAmelCase : int = start[current_token] __lowerCAmelCase : Any = list(start.keys() ) return next_tokens def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = self.next_tokens(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) == 0 def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = list(root.values() ) if len(_SCREAMING_SNAKE_CASE ) == 0: return 1 else: return sum([self.count_leaves(_SCREAMING_SNAKE_CASE ) for nn in next_nodes] ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = self.count_leaves(_SCREAMING_SNAKE_CASE ) return len(_SCREAMING_SNAKE_CASE ) != leaf_count class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE ): super(_SCREAMING_SNAKE_CASE , self ).__init__() if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or len(_SCREAMING_SNAKE_CASE ) == 0: raise ValueError(f"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." ) if any(not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) for token_ids in nested_token_ids ): raise ValueError(f"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." ) if any( any((not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." ) __lowerCAmelCase : int = DisjunctiveTrie(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = nested_token_ids __lowerCAmelCase : Any = self.trie.max_height __lowerCAmelCase : Union[str, Any] = [] __lowerCAmelCase : Tuple = False def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = self.trie.next_tokens(self.current_seq ) if len(_SCREAMING_SNAKE_CASE ) == 0: return None else: return token_list def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}" ) __lowerCAmelCase : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(_SCREAMING_SNAKE_CASE )}" ) __lowerCAmelCase : Tuple = False __lowerCAmelCase : List[str] = False __lowerCAmelCase : List[str] = False if self.does_advance(_SCREAMING_SNAKE_CASE ): self.current_seq.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = True else: __lowerCAmelCase : Optional[int] = True self.reset() __lowerCAmelCase : Dict = self.trie.reached_leaf(self.current_seq ) __lowerCAmelCase : Union[str, Any] = completed return stepped, completed, reset def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = False __lowerCAmelCase : List[str] = [] def __lowerCamelCase ( self ): if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : Any = DisjunctiveConstraint(self.token_ids ) if stateful: __lowerCAmelCase : Optional[Any] = self.seqlen __lowerCAmelCase : int = self.current_seq __lowerCAmelCase : Union[str, Any] = self.completed return new_constraint class A__ : def __init__( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = constraints # max # of steps required to fulfill a given constraint __lowerCAmelCase : Dict = max([c.seqlen for c in constraints] ) __lowerCAmelCase : Optional[Any] = len(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = False self.init_state() def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = [] __lowerCAmelCase : Tuple = None __lowerCAmelCase : Any = [constraint.copy(stateful=_SCREAMING_SNAKE_CASE ) for constraint in self.constraints] def __lowerCamelCase ( self ): __lowerCAmelCase : Any = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def __lowerCamelCase ( self ): __lowerCAmelCase : int = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __lowerCAmelCase : Union[str, Any] = constraint.advance() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): token_list.append(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): token_list.extend(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : int = self.inprogress_constraint.advance() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): token_list.append(_SCREAMING_SNAKE_CASE ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): token_list.extend(_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) == 0: return None else: return token_list def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.add(_SCREAMING_SNAKE_CASE ) # the entire list of constraints are fulfilled if self.completed: break def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`." ) __lowerCAmelCase , __lowerCAmelCase : List[str] = False, False if self.completed: __lowerCAmelCase : Dict = True __lowerCAmelCase : Optional[int] = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Tuple = self.inprogress_constraint.update(_SCREAMING_SNAKE_CASE ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : List[str] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __lowerCAmelCase : Dict = None if len(self.pending_constraints ) == 0: # we're done! __lowerCAmelCase : Optional[int] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = pending_constraint.update(_SCREAMING_SNAKE_CASE ) if not stepped: raise Exception( '`constraint.update(token_id)` is not yielding incremental progress, ' 'even though `constraint.does_advance(token_id)` is true.' ) if complete: self.complete_constraints.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = None if not complete and stepped: __lowerCAmelCase : Any = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __lowerCAmelCase : str = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __lowerCAmelCase : Optional[Any] = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=True ): __lowerCAmelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __lowerCAmelCase : Tuple = [ constraint.copy(stateful=_SCREAMING_SNAKE_CASE ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __lowerCAmelCase : List[Any] = self.inprogress_constraint.copy(stateful=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = [constraint.copy() for constraint in self.pending_constraints] return new_state
86
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } lowerCamelCase__ = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): for attribute in key.split('.' ): __lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: __lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: __lowerCAmelCase : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[Any] = value elif weight_type == "weight_v": __lowerCAmelCase : Any = value elif weight_type == "bias": __lowerCAmelCase : List[str] = value else: __lowerCAmelCase : List[Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Any = [] __lowerCAmelCase : Optional[int] = fairseq_model.state_dict() __lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , ) __lowerCAmelCase : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowerCAmelCase : int = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2] __lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase ) if "weight_g" in name: __lowerCAmelCase : Union[str, Any] = 'weight_g' elif "weight_v" in name: __lowerCAmelCase : int = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __lowerCAmelCase : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : List[str] = 'weight' else: __lowerCAmelCase : Optional[Any] = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1] __lowerCAmelCase : Any = name.split('.' ) __lowerCAmelCase : List[Any] = int(items[0] ) __lowerCAmelCase : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowerCAmelCase : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowerCAmelCase : int = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __lowerCAmelCase : Optional[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __lowerCAmelCase : Any = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): # load the pre-trained checkpoints __lowerCAmelCase : Any = torch.load(_UpperCamelCase ) __lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] ) __lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase ) else: __lowerCAmelCase : List[str] = WavLMConfig() __lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase ) recursively_load_weights(_UpperCamelCase , _UpperCamelCase ) hf_wavlm.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
1
"""simple docstring""" from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = OrderedDict( [ # Base model mapping ("""albert""", """FlaxAlbertModel"""), ("""bart""", """FlaxBartModel"""), ("""beit""", """FlaxBeitModel"""), ("""bert""", """FlaxBertModel"""), ("""big_bird""", """FlaxBigBirdModel"""), ("""blenderbot""", """FlaxBlenderbotModel"""), ("""blenderbot-small""", """FlaxBlenderbotSmallModel"""), ("""clip""", """FlaxCLIPModel"""), ("""distilbert""", """FlaxDistilBertModel"""), ("""electra""", """FlaxElectraModel"""), ("""gpt-sw3""", """FlaxGPT2Model"""), ("""gpt2""", """FlaxGPT2Model"""), ("""gpt_neo""", """FlaxGPTNeoModel"""), ("""gptj""", """FlaxGPTJModel"""), ("""longt5""", """FlaxLongT5Model"""), ("""marian""", """FlaxMarianModel"""), ("""mbart""", """FlaxMBartModel"""), ("""mt5""", """FlaxMT5Model"""), ("""opt""", """FlaxOPTModel"""), ("""pegasus""", """FlaxPegasusModel"""), ("""regnet""", """FlaxRegNetModel"""), ("""resnet""", """FlaxResNetModel"""), ("""roberta""", """FlaxRobertaModel"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormModel"""), ("""roformer""", """FlaxRoFormerModel"""), ("""t5""", """FlaxT5Model"""), ("""vision-text-dual-encoder""", """FlaxVisionTextDualEncoderModel"""), ("""vit""", """FlaxViTModel"""), ("""wav2vec2""", """FlaxWav2Vec2Model"""), ("""whisper""", """FlaxWhisperModel"""), ("""xglm""", """FlaxXGLMModel"""), ("""xlm-roberta""", """FlaxXLMRobertaModel"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for pre-training mapping ("""albert""", """FlaxAlbertForPreTraining"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForPreTraining"""), ("""big_bird""", """FlaxBigBirdForPreTraining"""), ("""electra""", """FlaxElectraForPreTraining"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ("""wav2vec2""", """FlaxWav2Vec2ForPreTraining"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Masked LM mapping ("""albert""", """FlaxAlbertForMaskedLM"""), ("""bart""", """FlaxBartForConditionalGeneration"""), ("""bert""", """FlaxBertForMaskedLM"""), ("""big_bird""", """FlaxBigBirdForMaskedLM"""), ("""distilbert""", """FlaxDistilBertForMaskedLM"""), ("""electra""", """FlaxElectraForMaskedLM"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""roberta""", """FlaxRobertaForMaskedLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMaskedLM"""), ("""roformer""", """FlaxRoFormerForMaskedLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForMaskedLM"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ("""bart""", """FlaxBartForConditionalGeneration"""), ("""blenderbot""", """FlaxBlenderbotForConditionalGeneration"""), ("""blenderbot-small""", """FlaxBlenderbotSmallForConditionalGeneration"""), ("""encoder-decoder""", """FlaxEncoderDecoderModel"""), ("""longt5""", """FlaxLongT5ForConditionalGeneration"""), ("""marian""", """FlaxMarianMTModel"""), ("""mbart""", """FlaxMBartForConditionalGeneration"""), ("""mt5""", """FlaxMT5ForConditionalGeneration"""), ("""pegasus""", """FlaxPegasusForConditionalGeneration"""), ("""t5""", """FlaxT5ForConditionalGeneration"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Image-classsification ("""beit""", """FlaxBeitForImageClassification"""), ("""regnet""", """FlaxRegNetForImageClassification"""), ("""resnet""", """FlaxResNetForImageClassification"""), ("""vit""", """FlaxViTForImageClassification"""), ] ) lowerCamelCase__ = OrderedDict( [ ("""vision-encoder-decoder""", """FlaxVisionEncoderDecoderModel"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Causal LM mapping ("""bart""", """FlaxBartForCausalLM"""), ("""bert""", """FlaxBertForCausalLM"""), ("""big_bird""", """FlaxBigBirdForCausalLM"""), ("""electra""", """FlaxElectraForCausalLM"""), ("""gpt-sw3""", """FlaxGPT2LMHeadModel"""), ("""gpt2""", """FlaxGPT2LMHeadModel"""), ("""gpt_neo""", """FlaxGPTNeoForCausalLM"""), ("""gptj""", """FlaxGPTJForCausalLM"""), ("""opt""", """FlaxOPTForCausalLM"""), ("""roberta""", """FlaxRobertaForCausalLM"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForCausalLM"""), ("""xglm""", """FlaxXGLMForCausalLM"""), ("""xlm-roberta""", """FlaxXLMRobertaForCausalLM"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Sequence Classification mapping ("""albert""", """FlaxAlbertForSequenceClassification"""), ("""bart""", """FlaxBartForSequenceClassification"""), ("""bert""", """FlaxBertForSequenceClassification"""), ("""big_bird""", """FlaxBigBirdForSequenceClassification"""), ("""distilbert""", """FlaxDistilBertForSequenceClassification"""), ("""electra""", """FlaxElectraForSequenceClassification"""), ("""mbart""", """FlaxMBartForSequenceClassification"""), ("""roberta""", """FlaxRobertaForSequenceClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForSequenceClassification"""), ("""roformer""", """FlaxRoFormerForSequenceClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForSequenceClassification"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Question Answering mapping ("""albert""", """FlaxAlbertForQuestionAnswering"""), ("""bart""", """FlaxBartForQuestionAnswering"""), ("""bert""", """FlaxBertForQuestionAnswering"""), ("""big_bird""", """FlaxBigBirdForQuestionAnswering"""), ("""distilbert""", """FlaxDistilBertForQuestionAnswering"""), ("""electra""", """FlaxElectraForQuestionAnswering"""), ("""mbart""", """FlaxMBartForQuestionAnswering"""), ("""roberta""", """FlaxRobertaForQuestionAnswering"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForQuestionAnswering"""), ("""roformer""", """FlaxRoFormerForQuestionAnswering"""), ("""xlm-roberta""", """FlaxXLMRobertaForQuestionAnswering"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Token Classification mapping ("""albert""", """FlaxAlbertForTokenClassification"""), ("""bert""", """FlaxBertForTokenClassification"""), ("""big_bird""", """FlaxBigBirdForTokenClassification"""), ("""distilbert""", """FlaxDistilBertForTokenClassification"""), ("""electra""", """FlaxElectraForTokenClassification"""), ("""roberta""", """FlaxRobertaForTokenClassification"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForTokenClassification"""), ("""roformer""", """FlaxRoFormerForTokenClassification"""), ("""xlm-roberta""", """FlaxXLMRobertaForTokenClassification"""), ] ) lowerCamelCase__ = OrderedDict( [ # Model for Multiple Choice mapping ("""albert""", """FlaxAlbertForMultipleChoice"""), ("""bert""", """FlaxBertForMultipleChoice"""), ("""big_bird""", """FlaxBigBirdForMultipleChoice"""), ("""distilbert""", """FlaxDistilBertForMultipleChoice"""), ("""electra""", """FlaxElectraForMultipleChoice"""), ("""roberta""", """FlaxRobertaForMultipleChoice"""), ("""roberta-prelayernorm""", """FlaxRobertaPreLayerNormForMultipleChoice"""), ("""roformer""", """FlaxRoFormerForMultipleChoice"""), ("""xlm-roberta""", """FlaxXLMRobertaForMultipleChoice"""), ] ) lowerCamelCase__ = OrderedDict( [ ("""bert""", """FlaxBertForNextSentencePrediction"""), ] ) lowerCamelCase__ = OrderedDict( [ ("""speech-encoder-decoder""", """FlaxSpeechEncoderDecoderModel"""), ("""whisper""", """FlaxWhisperForConditionalGeneration"""), ] ) lowerCamelCase__ = OrderedDict( [ ("""whisper""", """FlaxWhisperForAudioClassification"""), ] ) lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) lowerCamelCase__ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) lowerCamelCase__ = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class A__ ( _BaseAutoModelClass): A_ : Dict = FLAX_MODEL_MAPPING lowerCamelCase__ = auto_class_update(FlaxAutoModel) class A__ ( _BaseAutoModelClass): A_ : str = FLAX_MODEL_FOR_PRETRAINING_MAPPING lowerCamelCase__ = auto_class_update(FlaxAutoModelForPreTraining, head_doc="""pretraining""") class A__ ( _BaseAutoModelClass): A_ : List[str] = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING lowerCamelCase__ = auto_class_update(FlaxAutoModelForCausalLM, head_doc="""causal language modeling""") class A__ ( _BaseAutoModelClass): A_ : Tuple = FLAX_MODEL_FOR_MASKED_LM_MAPPING lowerCamelCase__ = auto_class_update(FlaxAutoModelForMaskedLM, head_doc="""masked language modeling""") class A__ ( _BaseAutoModelClass): A_ : Tuple = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING lowerCamelCase__ = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc="""sequence-to-sequence language modeling""", checkpoint_for_example="""t5-base""" ) class A__ ( _BaseAutoModelClass): A_ : Any = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING lowerCamelCase__ = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc="""sequence classification""" ) class A__ ( _BaseAutoModelClass): A_ : List[str] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING lowerCamelCase__ = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc="""question answering""") class A__ ( _BaseAutoModelClass): A_ : Any = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING lowerCamelCase__ = auto_class_update( FlaxAutoModelForTokenClassification, head_doc="""token classification""" ) class A__ ( _BaseAutoModelClass): A_ : List[Any] = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING lowerCamelCase__ = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc="""multiple choice""") class A__ ( _BaseAutoModelClass): A_ : Union[str, Any] = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING lowerCamelCase__ = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc="""next sentence prediction""" ) class A__ ( _BaseAutoModelClass): A_ : Any = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase__ = auto_class_update( FlaxAutoModelForImageClassification, head_doc="""image classification""" ) class A__ ( _BaseAutoModelClass): A_ : Union[str, Any] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING lowerCamelCase__ = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc="""vision-to-text modeling""") class A__ ( _BaseAutoModelClass): A_ : str = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING lowerCamelCase__ = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc="""sequence-to-sequence speech-to-text modeling""" )
86
"""simple docstring""" import os import pytest from attr import dataclass lowerCamelCase__ = """us-east-1""" # defaults region @dataclass class A__ : A_ : str A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' A_ : Optional[int] = { 'task_name': 'mnli', 'per_device_train_batch_size': 1_6, 'per_device_eval_batch_size': 1_6, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_0_0, 'save_steps': 5_5_0_0, } A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0} @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowerCamelCase ( self ): return f"{self.framework}-transfromers-test" @property def __lowerCamelCase ( self ): return f"./tests/sagemaker/scripts/{self.framework}" @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework )
86
1
"""simple docstring""" import baseaa import io import json import os from copy import deepcopy from ..optimizer import AcceleratedOptimizer from ..scheduler import AcceleratedScheduler class A__ : def __init__( self , _SCREAMING_SNAKE_CASE ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): # Don't modify user's data should they want to reuse it (e.g. in tests), because once we # modified it, it will not be accepted here again, since `auto` values would have been overridden __lowerCAmelCase : List[str] = deepcopy(_SCREAMING_SNAKE_CASE ) elif os.path.exists(_SCREAMING_SNAKE_CASE ): with io.open(_SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' ) as f: __lowerCAmelCase : Tuple = json.load(_SCREAMING_SNAKE_CASE ) else: try: __lowerCAmelCase : Any = baseaa.urlsafe_baadecode(_SCREAMING_SNAKE_CASE ).decode('utf-8' ) __lowerCAmelCase : int = json.loads(_SCREAMING_SNAKE_CASE ) except (UnicodeDecodeError, AttributeError, ValueError): raise ValueError( f"Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}" ) __lowerCAmelCase : Tuple = config self.set_stage_and_offload() def __lowerCamelCase ( self ): # zero stage - this is done as early as possible, before model is created, to allow # ``is_deepspeed_zero3_enabled`` query and getting to the early deepspeed config object # during ``zero.Init()`` which needs to know the dtype, and some other hparams. __lowerCAmelCase : List[Any] = self.get_value('zero_optimization.stage' , -1 ) # offload __lowerCAmelCase : int = False if self.is_zeroa() or self.is_zeroa(): __lowerCAmelCase : List[Any] = set(['cpu', 'nvme'] ) __lowerCAmelCase : Tuple = set( [ self.get_value('zero_optimization.offload_optimizer.device' ), self.get_value('zero_optimization.offload_param.device' ), ] ) if len(offload_devices & offload_devices_valid ) > 0: __lowerCAmelCase : List[str] = True def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = self.config # find the config node of interest if it exists __lowerCAmelCase : List[str] = ds_key_long.split('.' ) __lowerCAmelCase : List[str] = nodes.pop() for node in nodes: __lowerCAmelCase : Union[str, Any] = config.get(_SCREAMING_SNAKE_CASE ) if config is None: return None, ds_key return config, ds_key def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowerCAmelCase , __lowerCAmelCase : Any = self.find_config_node(_SCREAMING_SNAKE_CASE ) if config is None: return default return config.get(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : Tuple = self.config # find the config node of interest if it exists __lowerCAmelCase : List[Any] = ds_key_long.split('.' ) for node in nodes: __lowerCAmelCase : Tuple = config __lowerCAmelCase : Optional[int] = config.get(_SCREAMING_SNAKE_CASE ) if config is None: if must_exist: raise ValueError(f"Can't find {ds_key_long} entry in the config: {self.config}" ) else: return # if found remove it if parent_config is not None: parent_config.pop(_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = self.get_value(_SCREAMING_SNAKE_CASE ) return False if value is None else bool(_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = self.get_value(_SCREAMING_SNAKE_CASE ) return False if value is None else not bool(_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): return self._stage == 2 def __lowerCamelCase ( self ): return self._stage == 3 def __lowerCamelCase ( self ): return self._offload class A__ : def __init__( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = engine def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # runs backpropagation and handles mixed precision self.engine.backward(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) # Deepspeed's `engine.step` performs the following operations: # - gradient accumulation check # - gradient clipping # - optimizer step # - zero grad # - checking overflow # - lr_scheduler step (only if engine.lr_scheduler is not None) self.engine.step() # and this plugin overrides the above calls with no-ops when Accelerate runs under # Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple # training loop that works transparently under many training regimes. class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE ): super().__init__(_SCREAMING_SNAKE_CASE , device_placement=_SCREAMING_SNAKE_CASE , scaler=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = hasattr(self.optimizer , 'overflow' ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed def __lowerCamelCase ( self ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed @property def __lowerCamelCase ( self ): if self.__has_overflow__: return self.optimizer.overflow return False class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = params __lowerCAmelCase : Dict = lr __lowerCAmelCase : List[Any] = weight_decay __lowerCAmelCase : str = kwargs class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = optimizer __lowerCAmelCase : int = total_num_steps __lowerCAmelCase : str = warmup_num_steps __lowerCAmelCase : List[str] = kwargs
86
"""simple docstring""" from __future__ import annotations lowerCamelCase__ = list[tuple[int, int]] lowerCamelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = pos_x __lowerCAmelCase : Optional[Any] = pos_y __lowerCAmelCase : Optional[int] = (pos_y, pos_x) __lowerCAmelCase : Union[str, Any] = goal_x __lowerCAmelCase : Any = goal_y __lowerCAmelCase : Optional[Any] = g_cost __lowerCAmelCase : Any = parent __lowerCAmelCase : Union[str, Any] = self.calculate_heuristic() def __lowerCamelCase ( self ): __lowerCAmelCase : str = abs(self.pos_x - self.goal_x ) __lowerCAmelCase : str = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , _SCREAMING_SNAKE_CASE ): return self.f_cost < other.f_cost class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = [self.start] __lowerCAmelCase : list[Node] = [] __lowerCAmelCase : str = False def __lowerCamelCase ( self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: __lowerCAmelCase : Union[str, Any] = True return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path __lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = [] for action in delta: __lowerCAmelCase : Optional[int] = parent.pos_x + action[1] __lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = node __lowerCAmelCase : Optional[int] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowerCAmelCase : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowerCamelCase__ = (0, 0) lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("""------""") lowerCamelCase__ = GreedyBestFirst(init, goal) lowerCamelCase__ = greedy_bf.search() if path: for pos_x, pos_y in path: lowerCamelCase__ = 2 for elem in grid: print(elem)
86
1
"""simple docstring""" import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): # Initialise PyTorch model __lowerCAmelCase : Optional[int] = BertConfig.from_json_file(_UpperCamelCase ) print(F"Building PyTorch model from configuration: {config}" ) __lowerCAmelCase : Union[str, Any] = BertForPreTraining(_UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_bert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() , _UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--bert_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained BERT model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) lowerCamelCase__ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
86
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float(moles / volume ) * nfactor ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (volume) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" from __future__ import annotations from typing import Any def __lowerCAmelCase (_UpperCamelCase ): create_state_space_tree(_UpperCamelCase , [] , 0 ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if index == len(_UpperCamelCase ): print(_UpperCamelCase ) return create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 ) current_subsequence.append(sequence[index] ) create_state_space_tree(_UpperCamelCase , _UpperCamelCase , index + 1 ) current_subsequence.pop() if __name__ == "__main__": lowerCamelCase__ = [3, 1, 2, 4] generate_all_subsequences(seq) seq.clear() seq.extend(["""A""", """B""", """C"""]) generate_all_subsequences(seq)
86
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A__ ( enum.Enum): A_ : List[Any] = 0 A_ : Dict = 1 A_ : Union[str, Any] = 2 @add_end_docstrings(_lowerCamelCase) class A__ ( _lowerCamelCase): A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowerCAmelCase : Any = None if self.model.config.prefix is not None: __lowerCAmelCase : str = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowerCAmelCase : Tuple = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params ) __lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params} __lowerCAmelCase : List[str] = {**self._forward_params, **forward_params} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Optional[int] = {} if prefix is not None: __lowerCAmelCase : Union[str, Any] = prefix if prefix: __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ' [None, \'hole\']' ) __lowerCAmelCase : int = handle_long_generation preprocess_params.update(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = generate_kwargs __lowerCAmelCase : List[Any] = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : List[Any] = ReturnType.TENSORS if return_type is not None: __lowerCAmelCase : Optional[Any] = return_type if clean_up_tokenization_spaces is not None: __lowerCAmelCase : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: __lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowerCAmelCase : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = self.tokenizer( prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : Optional[Any] = prompt_text if handle_long_generation == "hole": __lowerCAmelCase : str = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens'] else: __lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:] return inputs def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = model_inputs['input_ids'] __lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE ) # Allow empty prompts if input_ids.shape[1] == 0: __lowerCAmelCase : Dict = None __lowerCAmelCase : str = None __lowerCAmelCase : Tuple = 1 else: __lowerCAmelCase : Any = input_ids.shape[0] __lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = generated_sequence.shape[0] if self.framework == "pt": __lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ): __lowerCAmelCase : Any = model_outputs['generated_sequence'][0] __lowerCAmelCase : Tuple = model_outputs['input_ids'] __lowerCAmelCase : Any = model_outputs['prompt_text'] __lowerCAmelCase : int = generated_sequence.numpy().tolist() __lowerCAmelCase : Union[str, Any] = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowerCAmelCase : int = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowerCAmelCase : Any = self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowerCAmelCase : Optional[Any] = 0 else: __lowerCAmelCase : Any = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) ) if return_type == ReturnType.FULL_TEXT: __lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:] else: __lowerCAmelCase : int = text[prompt_length:] __lowerCAmelCase : Dict = {'generated_text': all_text} records.append(_SCREAMING_SNAKE_CASE ) return records
86
1
"""simple docstring""" import argparse import json from collections import OrderedDict from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SegformerConfig, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ): __lowerCAmelCase : Any = OrderedDict() for key, value in state_dict.items(): if encoder_only and not key.startswith('head' ): __lowerCAmelCase : Dict = 'segformer.encoder.' + key if key.startswith('backbone' ): __lowerCAmelCase : Optional[int] = key.replace('backbone' , 'segformer.encoder' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 __lowerCAmelCase : str = key[key.find('patch_embed' ) + len('patch_embed' )] __lowerCAmelCase : Any = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(_UpperCamelCase )-1}" ) if "norm" in key: __lowerCAmelCase : List[str] = key.replace('norm' , 'layer_norm' ) if "segformer.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 __lowerCAmelCase : str = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )] __lowerCAmelCase : Optional[Any] = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(_UpperCamelCase )-1}" ) if "layer_norm1" in key: __lowerCAmelCase : Any = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: __lowerCAmelCase : str = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 __lowerCAmelCase : Union[str, Any] = key[key.find('block' ) + len('block' )] __lowerCAmelCase : Dict = key.replace(F"block{idx}" , F"block.{int(_UpperCamelCase )-1}" ) if "attn.q" in key: __lowerCAmelCase : int = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: __lowerCAmelCase : Dict = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: __lowerCAmelCase : Dict = key.replace('attn' , 'attention.self' ) if "fc1" in key: __lowerCAmelCase : Tuple = key.replace('fc1' , 'dense1' ) if "fc2" in key: __lowerCAmelCase : List[str] = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: __lowerCAmelCase : int = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: __lowerCAmelCase : List[Any] = key.replace('linear_fuse.conv' , 'linear_fuse' ) __lowerCAmelCase : List[Any] = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 __lowerCAmelCase : List[Any] = key[key.find('linear_c' ) + len('linear_c' )] __lowerCAmelCase : Tuple = key.replace(F"linear_c{idx}" , F"linear_c.{int(_UpperCamelCase )-1}" ) if key.startswith('head' ): __lowerCAmelCase : Tuple = key.replace('head' , 'classifier' ) __lowerCAmelCase : Union[str, Any] = value return new_state_dict def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) __lowerCAmelCase : Union[str, Any] = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" ) __lowerCAmelCase : Optional[int] = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" ) # next, add keys and values (in that order) to the state dict __lowerCAmelCase : str = kv_weight[ : config.hidden_sizes[i], : ] __lowerCAmelCase : Tuple = kv_bias[: config.hidden_sizes[i]] __lowerCAmelCase : Union[str, Any] = kv_weight[ config.hidden_sizes[i] :, : ] __lowerCAmelCase : Tuple = kv_bias[ config.hidden_sizes[i] : ] def __lowerCAmelCase (): __lowerCAmelCase : Optional[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase : List[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return image @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Dict = SegformerConfig() __lowerCAmelCase : List[Any] = False # set attributes based on model_name __lowerCAmelCase : int = 'huggingface/label-files' if "segformer" in model_name: __lowerCAmelCase : List[Any] = model_name[len('segformer.' ) : len('segformer.' ) + 2] if "ade" in model_name: __lowerCAmelCase : str = 150 __lowerCAmelCase : int = 'ade20k-id2label.json' __lowerCAmelCase : Union[str, Any] = (1, 150, 128, 128) elif "city" in model_name: __lowerCAmelCase : Any = 19 __lowerCAmelCase : Any = 'cityscapes-id2label.json' __lowerCAmelCase : Dict = (1, 19, 128, 128) else: raise ValueError(F"Model {model_name} not supported" ) elif "mit" in model_name: __lowerCAmelCase : str = True __lowerCAmelCase : Optional[Any] = model_name[4:6] __lowerCAmelCase : Any = 1000 __lowerCAmelCase : Dict = 'imagenet-1k-id2label.json' __lowerCAmelCase : Optional[int] = (1, 1000) else: raise ValueError(F"Model {model_name} not supported" ) # set config attributes __lowerCAmelCase : Optional[Any] = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) ) __lowerCAmelCase : str = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase : List[Any] = idalabel __lowerCAmelCase : str = {v: k for k, v in idalabel.items()} if size == "b0": pass elif size == "b1": __lowerCAmelCase : int = [64, 128, 320, 512] __lowerCAmelCase : Optional[Any] = 256 elif size == "b2": __lowerCAmelCase : Optional[Any] = [64, 128, 320, 512] __lowerCAmelCase : List[Any] = 768 __lowerCAmelCase : Union[str, Any] = [3, 4, 6, 3] elif size == "b3": __lowerCAmelCase : Optional[int] = [64, 128, 320, 512] __lowerCAmelCase : int = 768 __lowerCAmelCase : Tuple = [3, 4, 18, 3] elif size == "b4": __lowerCAmelCase : Any = [64, 128, 320, 512] __lowerCAmelCase : int = 768 __lowerCAmelCase : Any = [3, 8, 27, 3] elif size == "b5": __lowerCAmelCase : List[str] = [64, 128, 320, 512] __lowerCAmelCase : Optional[Any] = 768 __lowerCAmelCase : Tuple = [3, 6, 40, 3] else: raise ValueError(F"Size {size} not supported" ) # load image processor (only resize + normalize) __lowerCAmelCase : Tuple = SegformerImageProcessor( image_scale=(512, 512) , keep_ratio=_UpperCamelCase , align=_UpperCamelCase , do_random_crop=_UpperCamelCase ) # prepare image __lowerCAmelCase : Any = prepare_img() __lowerCAmelCase : Optional[Any] = image_processor(images=_UpperCamelCase , return_tensors='pt' ).pixel_values logger.info(F"Converting model {model_name}..." ) # load original state dict if encoder_only: __lowerCAmelCase : Optional[Any] = torch.load(_UpperCamelCase , map_location=torch.device('cpu' ) ) else: __lowerCAmelCase : str = torch.load(_UpperCamelCase , map_location=torch.device('cpu' ) )['state_dict'] # rename keys __lowerCAmelCase : List[Any] = rename_keys(_UpperCamelCase , encoder_only=_UpperCamelCase ) if not encoder_only: del state_dict["decode_head.conv_seg.weight"] del state_dict["decode_head.conv_seg.bias"] # key and value matrices need special treatment read_in_k_v(_UpperCamelCase , _UpperCamelCase ) # create HuggingFace model and load state dict if encoder_only: __lowerCAmelCase : List[Any] = False __lowerCAmelCase : int = SegformerForImageClassification(_UpperCamelCase ) else: __lowerCAmelCase : Dict = SegformerForSemanticSegmentation(_UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() # forward pass __lowerCAmelCase : int = model(_UpperCamelCase ) __lowerCAmelCase : Optional[Any] = outputs.logits # set expected_slice based on model name # ADE20k checkpoints if model_name == "segformer.b0.512x512.ade.160k": __lowerCAmelCase : List[str] = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ) elif model_name == "segformer.b1.512x512.ade.160k": __lowerCAmelCase : List[str] = torch.tensor( [ [[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]], [[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]], [[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]], ] ) elif model_name == "segformer.b2.512x512.ade.160k": __lowerCAmelCase : Optional[int] = torch.tensor( [ [[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]], [[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]], [[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]], ] ) elif model_name == "segformer.b3.512x512.ade.160k": __lowerCAmelCase : Dict = torch.tensor( [ [[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]], [[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]], [[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]], ] ) elif model_name == "segformer.b4.512x512.ade.160k": __lowerCAmelCase : Dict = torch.tensor( [ [[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]], [[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]], [[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]], ] ) elif model_name == "segformer.b5.640x640.ade.160k": __lowerCAmelCase : Union[str, Any] = torch.tensor( [ [[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]], [[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]], [[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]], ] ) # Cityscapes checkpoints elif model_name == "segformer.b0.1024x1024.city.160k": __lowerCAmelCase : int = torch.tensor( [ [[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]], [[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]], [[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]], ] ) elif model_name == "segformer.b0.512x1024.city.160k": __lowerCAmelCase : Dict = torch.tensor( [ [[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]], [[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]], [[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]], ] ) elif model_name == "segformer.b0.640x1280.city.160k": __lowerCAmelCase : Optional[int] = torch.tensor( [ [ [-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1], [-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1], [-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1], ], [ [-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1], [-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1], [-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1], ], [ [7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2], [4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1], [3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1], ], ] ) elif model_name == "segformer.b0.768x768.city.160k": __lowerCAmelCase : Optional[Any] = torch.tensor( [ [[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]], [[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]], [[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]], ] ) elif model_name == "segformer.b1.1024x1024.city.160k": __lowerCAmelCase : Dict = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ) elif model_name == "segformer.b2.1024x1024.city.160k": __lowerCAmelCase : str = torch.tensor( [ [[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]], [[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]], [[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]], ] ) elif model_name == "segformer.b3.1024x1024.city.160k": __lowerCAmelCase : Tuple = torch.tensor( [ [[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]], [[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]], [[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]], ] ) elif model_name == "segformer.b4.1024x1024.city.160k": __lowerCAmelCase : Any = torch.tensor( [ [[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]], [[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]], [[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]], ] ) elif model_name == "segformer.b5.1024x1024.city.160k": __lowerCAmelCase : Any = torch.tensor( [ [[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]], [[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]], [[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]], ] ) else: __lowerCAmelCase : Dict = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) # verify logits if not encoder_only: assert logits.shape == expected_shape assert torch.allclose(logits[0, :3, :3, :3] , _UpperCamelCase , atol=1e-2 ) # finally, save model and image processor logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) image_processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument( """--model_name""", default="""segformer.b0.512x512.ade.160k""", type=str, help="""Name of the model you'd like to convert.""", ) parser.add_argument( """--checkpoint_path""", default=None, type=str, help="""Path to the original PyTorch checkpoint (.pth file).""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model.""" ) lowerCamelCase__ = parser.parse_args() convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
86
"""simple docstring""" from __future__ import annotations import bisect def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): if hi < 0: __lowerCAmelCase : Tuple = len(_UpperCamelCase ) while lo < hi: __lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __lowerCAmelCase : int = mid + 1 else: __lowerCAmelCase : List[str] = mid return lo def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): if hi < 0: __lowerCAmelCase : List[Any] = len(_UpperCamelCase ) while lo < hi: __lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __lowerCAmelCase : Dict = mid + 1 else: __lowerCAmelCase : str = mid return lo def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = 0 __lowerCAmelCase : int = len(_UpperCamelCase ) - 1 while left <= right: __lowerCAmelCase : List[Any] = left + (right - left) // 2 __lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __lowerCAmelCase : Tuple = midpoint - 1 else: __lowerCAmelCase : str = midpoint + 1 return None def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase ) if index != len(_UpperCamelCase ) and sorted_collection[index] == item: return index return None def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if right < left: return None __lowerCAmelCase : List[str] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip() lowerCamelCase__ = sorted(int(item) for item in user_input.split(""",""")) lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n""")) lowerCamelCase__ = binary_search(collection, target) if result is None: print(f'{target} was not found in {collection}.') else: print(f'{target} was found at position {result} in {collection}.')
86
1
"""simple docstring""" from __future__ import annotations class A__ : def __init__( self , _SCREAMING_SNAKE_CASE=None ): __lowerCAmelCase : Dict = data __lowerCAmelCase : Union[str, Any] = None def __repr__( self ): __lowerCAmelCase : List[Any] = [] __lowerCAmelCase : Union[str, Any] = self while temp: string_rep.append(f"{temp.data}" ) __lowerCAmelCase : Union[str, Any] = temp.next return "->".join(_SCREAMING_SNAKE_CASE ) def __lowerCAmelCase (_UpperCamelCase ): if not elements_list: raise Exception('The Elements List is empty' ) __lowerCAmelCase : str = Node(elements_list[0] ) for i in range(1 , len(_UpperCamelCase ) ): __lowerCAmelCase : Optional[Any] = Node(elements_list[i] ) __lowerCAmelCase : Dict = current.next return head def __lowerCAmelCase (_UpperCamelCase ): if head_node is not None and isinstance(_UpperCamelCase , _UpperCamelCase ): print_reverse(head_node.next ) print(head_node.data ) def __lowerCAmelCase (): from doctest import testmod testmod() __lowerCAmelCase : str = make_linked_list([14, 52, 14, 12, 43] ) print('Linked List:' ) print(_UpperCamelCase ) print('Elements in Reverse:' ) print_reverse(_UpperCamelCase ) if __name__ == "__main__": main()
86
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
86
1
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=1000 ): if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __lowerCAmelCase : Tuple = n - 1 __lowerCAmelCase : Dict = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __lowerCAmelCase : List[str] = 0 while count < prec: __lowerCAmelCase : List[Any] = random.randint(2 , n - 1 ) __lowerCAmelCase : Tuple = bin_exp_mod(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if b != 1: __lowerCAmelCase : Dict = True for _ in range(_UpperCamelCase ): if b == n - 1: __lowerCAmelCase : Dict = False break __lowerCAmelCase : Optional[Any] = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": lowerCamelCase__ = abs(int(input("""Enter bound : """).strip())) print("""Here's the list of primes:""") print(""", """.join(str(i) for i in range(n + 1) if is_prime_big(i)))
86
"""simple docstring""" from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCamelCase__ = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ): __lowerCAmelCase : Dict = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 __lowerCAmelCase : List[str] = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): super().__init__() self.register_modules( text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if latents is None: __lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) __lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma return latents def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1 # get prompt text embeddings __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) __lowerCAmelCase : Tuple = text_inputs.input_ids __lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) __lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase : List[str] if negative_prompt is None: __lowerCAmelCase : Union[str, Any] = [''] * batch_size elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !=" f" {type(_SCREAMING_SNAKE_CASE )}." ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" ' the batch size of `prompt`.' ) else: __lowerCAmelCase : Optional[int] = negative_prompt __lowerCAmelCase : Tuple = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) __lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1] __lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1] __lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) __lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) __lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) __lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) __lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) __lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" ) __lowerCAmelCase : List[Any] = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ): if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) __lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCAmelCase : Any = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: __lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE ) if self.safety_checker is not None: __lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE ) # We'll offload the last model manually. __lowerCAmelCase : Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCamelCase ( self ): if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = 1 elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" ) __lowerCAmelCase : Dict = self._execution_device __lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt __lowerCAmelCase : Optional[int] = guidance_scale > 1.0 __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE ) self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.scheduler.timesteps __lowerCAmelCase : int = self.unet.config.in_channels __lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor ) # create initial latent __lowerCAmelCase : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , ) for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance __lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds} __lowerCAmelCase : Optional[Any] = self.unet( sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0] if do_classifier_free_guidance: __lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 ) __lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 ) __lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCAmelCase : List[str] = self.scheduler.step( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample # post-processing __lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: __lowerCAmelCase : List[str] = image * 0.5 + 0.5 __lowerCAmelCase : Dict = image.clamp(0 , 1 ) __lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
86
1
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers lowerCamelCase__ = """python tqdm regex requests packaging filelock numpy tokenizers""".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("""dataclasses""") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("""importlib_metadata""") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(f'can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py') def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=None ): require_version(deps[pkg] , _UpperCamelCase )
86
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = BarthezTokenizer A_ : Tuple = BarthezTokenizerFast A_ : Dict = True A_ : List[str] = True def __lowerCamelCase ( self ): super().setUp() __lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = tokenizer def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = '<pad>' __lowerCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 ) def __lowerCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2] __lowerCAmelCase : Optional[int] = self.tokenizer( _SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __lowerCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): if not self.test_rust_tokenizer: return __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.' __lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # fmt: off __lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __lowerCAmelCase : Union[str, Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
86
1
"""simple docstring""" import math def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Tuple = [] __lowerCAmelCase : Dict = 2 __lowerCAmelCase : Any = int(math.sqrt(_UpperCamelCase ) ) # Size of every segment __lowerCAmelCase : Tuple = [True] * (end + 1) __lowerCAmelCase : Any = [] while start <= end: if temp[start] is True: in_prime.append(_UpperCamelCase ) for i in range(start * start , end + 1 , _UpperCamelCase ): __lowerCAmelCase : int = False start += 1 prime += in_prime __lowerCAmelCase : Union[str, Any] = end + 1 __lowerCAmelCase : Tuple = min(2 * end , _UpperCamelCase ) while low <= n: __lowerCAmelCase : List[str] = [True] * (high - low + 1) for each in in_prime: __lowerCAmelCase : int = math.floor(low / each ) * each if t < low: t += each for j in range(_UpperCamelCase , high + 1 , _UpperCamelCase ): __lowerCAmelCase : Any = False for j in range(len(_UpperCamelCase ) ): if temp[j] is True: prime.append(j + low ) __lowerCAmelCase : Tuple = high + 1 __lowerCAmelCase : int = min(high + end , _UpperCamelCase ) return prime print(sieve(10**6))
86
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class A__ ( _lowerCamelCase): A_ : Optional[int] = 'poolformer' def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = num_channels __lowerCAmelCase : str = patch_size __lowerCAmelCase : Optional[Any] = stride __lowerCAmelCase : Optional[int] = padding __lowerCAmelCase : List[Any] = pool_size __lowerCAmelCase : int = hidden_sizes __lowerCAmelCase : str = mlp_ratio __lowerCAmelCase : Optional[int] = depths __lowerCAmelCase : str = patch_sizes __lowerCAmelCase : str = strides __lowerCAmelCase : Optional[int] = num_encoder_blocks __lowerCAmelCase : Any = drop_path_rate __lowerCAmelCase : Any = hidden_act __lowerCAmelCase : Dict = use_layer_scale __lowerCAmelCase : Union[str, Any] = layer_scale_init_value __lowerCAmelCase : Dict = initializer_range super().__init__(**_SCREAMING_SNAKE_CASE ) class A__ ( _lowerCamelCase): A_ : List[str] = version.parse('1.11') @property def __lowerCamelCase ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowerCamelCase ( self ): return 2E-3
86
1
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A__ ( enum.Enum): A_ : List[Any] = 0 A_ : Dict = 1 A_ : Union[str, Any] = 2 @add_end_docstrings(_lowerCamelCase) class A__ ( _lowerCamelCase): A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowerCAmelCase : Any = None if self.model.config.prefix is not None: __lowerCAmelCase : str = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowerCAmelCase : Tuple = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params ) __lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params} __lowerCAmelCase : List[str] = {**self._forward_params, **forward_params} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Optional[int] = {} if prefix is not None: __lowerCAmelCase : Union[str, Any] = prefix if prefix: __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ' [None, \'hole\']' ) __lowerCAmelCase : int = handle_long_generation preprocess_params.update(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = generate_kwargs __lowerCAmelCase : List[Any] = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : List[Any] = ReturnType.TENSORS if return_type is not None: __lowerCAmelCase : Optional[Any] = return_type if clean_up_tokenization_spaces is not None: __lowerCAmelCase : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: __lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowerCAmelCase : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = self.tokenizer( prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : Optional[Any] = prompt_text if handle_long_generation == "hole": __lowerCAmelCase : str = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens'] else: __lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:] return inputs def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = model_inputs['input_ids'] __lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE ) # Allow empty prompts if input_ids.shape[1] == 0: __lowerCAmelCase : Dict = None __lowerCAmelCase : str = None __lowerCAmelCase : Tuple = 1 else: __lowerCAmelCase : Any = input_ids.shape[0] __lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = generated_sequence.shape[0] if self.framework == "pt": __lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ): __lowerCAmelCase : Any = model_outputs['generated_sequence'][0] __lowerCAmelCase : Tuple = model_outputs['input_ids'] __lowerCAmelCase : Any = model_outputs['prompt_text'] __lowerCAmelCase : int = generated_sequence.numpy().tolist() __lowerCAmelCase : Union[str, Any] = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowerCAmelCase : int = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowerCAmelCase : Any = self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowerCAmelCase : Optional[Any] = 0 else: __lowerCAmelCase : Any = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) ) if return_type == ReturnType.FULL_TEXT: __lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:] else: __lowerCAmelCase : int = text[prompt_length:] __lowerCAmelCase : Dict = {'generated_text': all_text} records.append(_SCREAMING_SNAKE_CASE ) return records
86
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = DiTPipeline A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A_ : List[Any] = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A_ : Tuple = False def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : List[str] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : str = AutoencoderKL() __lowerCAmelCase : Union[str, Any] = DDIMScheduler() __lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = 'cpu' __lowerCAmelCase : Any = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images __lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) __lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 ) def __lowerCamelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = torch.manual_seed(0 ) __lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) __lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf'] __lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCamelCase ( self ): __lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) __lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) __lowerCAmelCase : Dict = ['vase', 'umbrella'] __lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1E-1
86
1
"""simple docstring""" import functools from typing import Any def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): # Validation if not isinstance(_UpperCamelCase , _UpperCamelCase ) or len(_UpperCamelCase ) == 0: raise ValueError('the string should be not empty string' ) if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not all( isinstance(_UpperCamelCase , _UpperCamelCase ) and len(_UpperCamelCase ) > 0 for item in words ): raise ValueError('the words should be a list of non-empty strings' ) # Build trie __lowerCAmelCase : dict[str, Any] = {} __lowerCAmelCase : Optional[Any] = 'WORD_KEEPER' for word in words: __lowerCAmelCase : Optional[int] = trie for c in word: if c not in trie_node: __lowerCAmelCase : Optional[int] = {} __lowerCAmelCase : Any = trie_node[c] __lowerCAmelCase : Any = True __lowerCAmelCase : List[str] = len(_UpperCamelCase ) # Dynamic programming method @functools.cache def is_breakable(_UpperCamelCase ) -> bool: if index == len_string: return True __lowerCAmelCase : Union[str, Any] = trie for i in range(_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : str = trie_node.get(string[i] , _UpperCamelCase ) if trie_node is None: return False if trie_node.get(_UpperCamelCase , _UpperCamelCase ) and is_breakable(i + 1 ): return True return False return is_breakable(0 ) if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline from diffusers.pipelines.shap_e import ShapERenderer from diffusers.utils import floats_tensor, load_image, load_numpy, slow from diffusers.utils.testing_utils import require_torch_gpu, torch_device from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference class A__ ( _lowerCamelCase , unittest.TestCase): A_ : str = ShapEImgaImgPipeline A_ : str = ['image'] A_ : int = ['image'] A_ : Tuple = [ 'num_images_per_prompt', 'num_inference_steps', 'generator', 'latents', 'guidance_scale', 'frame_size', 'output_type', 'return_dict', ] A_ : Tuple = False @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): return 8 @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Any = CLIPVisionConfig( hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , ) __lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): __lowerCAmelCase : Any = CLIPImageProcessor( crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , ) return image_processor @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = { 'num_attention_heads': 2, 'attention_head_dim': 16, 'embedding_dim': self.time_input_dim, 'num_embeddings': 32, 'embedding_proj_dim': self.text_embedder_hidden_size, 'time_embed_dim': self.time_embed_dim, 'num_layers': 1, 'clip_embed_dim': self.time_input_dim * 2, 'additional_embeddings': 0, 'time_embed_act_fn': 'gelu', 'norm_in_type': 'layer', 'embedding_proj_norm_type': 'layer', 'encoder_hid_proj_type': None, 'added_emb_type': None, } __lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Dict = { 'param_shapes': ( (self.renderer_dim, 93), (self.renderer_dim, 8), (self.renderer_dim, 8), (self.renderer_dim, 8), ), 'd_latent': self.time_input_dim, 'd_hidden': self.renderer_dim, 'n_output': 12, 'background': ( 0.1, 0.1, 0.1, ), } __lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE ) return model def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.dummy_prior __lowerCAmelCase : List[Any] = self.dummy_image_encoder __lowerCAmelCase : int = self.dummy_image_processor __lowerCAmelCase : Any = self.dummy_renderer __lowerCAmelCase : Any = HeunDiscreteScheduler( beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , ) __lowerCAmelCase : Tuple = { 'prior': prior, 'image_encoder': image_encoder, 'image_processor': image_processor, 'renderer': renderer, 'scheduler': scheduler, } return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): __lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = { 'image': input_image, 'generator': generator, 'num_inference_steps': 1, 'frame_size': 32, 'output_type': 'np', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : str = 'cpu' __lowerCAmelCase : Dict = self.get_dummy_components() __lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Any = output.images[0] __lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (20, 32, 32, 3) __lowerCAmelCase : List[Any] = np.array( [ 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, 0.0003_9216, ] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def __lowerCamelCase ( self ): # NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches self._test_inference_batch_consistent(batch_sizes=[1, 2] ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = torch_device == 'cpu' __lowerCAmelCase : Optional[Any] = True self._test_inference_batch_single_identical( batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.get_dummy_components() __lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : List[str] = 2 __lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) for key in inputs.keys(): if key in self.batch_params: __lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]] __lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0] assert images.shape[0] == batch_size * num_images_per_prompt @slow @require_torch_gpu class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : int = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' ) __lowerCAmelCase : Any = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/test_shap_e_img2img_out.npy' ) __lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' ) __lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 ) __lowerCAmelCase : int = pipe( _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0] assert images.shape == (20, 64, 64, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
86
1
"""simple docstring""" import datasets from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py lowerCamelCase__ = """\ @INPROCEEDINGS{Papineni02bleu:a, author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu}, title = {BLEU: a Method for Automatic Evaluation of Machine Translation}, booktitle = {}, year = {2002}, pages = {311--318} } @inproceedings{lin-och-2004-orange, title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\", author = \"Lin, Chin-Yew and Och, Franz Josef\", booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\", month = \"aug 23{--}aug 27\", year = \"2004\", address = \"Geneva, Switzerland\", publisher = \"COLING\", url = \"https://www.aclweb.org/anthology/C04-1072\", pages = \"501--507\", } """ lowerCamelCase__ = """\ BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another. Quality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation, the better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and remains one of the most popular automated and inexpensive metrics. Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations. Those scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness are not taken into account[citation needed]. BLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1 representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional reference translations will increase the BLEU score. """ lowerCamelCase__ = """ Computes BLEU score of translated segments against one or more references. Args: predictions: list of translations to score. Each translation should be tokenized into a list of tokens. references: list of lists of references for each translation. Each reference should be tokenized into a list of tokens. max_order: Maximum n-gram order to use when computing BLEU score. smooth: Whether or not to apply Lin et al. 2004 smoothing. Returns: 'bleu': bleu score, 'precisions': geometric mean of n-gram precisions, 'brevity_penalty': brevity penalty, 'length_ratio': ratio of lengths, 'translation_length': translation_length, 'reference_length': reference_length Examples: >>> predictions = [ ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample ... ] >>> references = [ ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references) ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference) ... ] >>> bleu = datasets.load_metric(\"bleu\") >>> results = bleu.compute(predictions=predictions, references=references) >>> print(results[\"bleu\"]) 1.0 """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class A__ ( datasets.Metric): def __lowerCamelCase ( self ): return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ), 'references': datasets.Sequence( datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ), } ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[ 'https://en.wikipedia.org/wiki/BLEU', 'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213', ] , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : Optional[Any] = compute_bleu( reference_corpus=_SCREAMING_SNAKE_CASE , translation_corpus=_SCREAMING_SNAKE_CASE , max_order=_SCREAMING_SNAKE_CASE , smooth=_SCREAMING_SNAKE_CASE ) ((__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase) , (__lowerCAmelCase)) : Dict = score return { "bleu": bleu, "precisions": precisions, "brevity_penalty": bp, "length_ratio": ratio, "translation_length": translation_length, "reference_length": reference_length, }
86
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""FNetTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""FNetTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FNET_PRETRAINED_MODEL_ARCHIVE_LIST""", """FNetForMaskedLM""", """FNetForMultipleChoice""", """FNetForNextSentencePrediction""", """FNetForPreTraining""", """FNetForQuestionAnswering""", """FNetForSequenceClassification""", """FNetForTokenClassification""", """FNetLayer""", """FNetModel""", """FNetPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
86
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class A__ ( metaclass=_lowerCamelCase): A_ : Any = ['onnx'] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(self , ['onnx'] ) @classmethod def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(cls , ['onnx'] ) @classmethod def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): requires_backends(cls , ['onnx'] )
86
"""simple docstring""" import math import sys def __lowerCAmelCase (_UpperCamelCase ): if number != int(_UpperCamelCase ): raise ValueError('the value of input must be a natural number' ) if number < 0: raise ValueError('the value of input must not be a negative number' ) if number == 0: return 1 __lowerCAmelCase : Any = [-1] * (number + 1) __lowerCAmelCase : List[Any] = 0 for i in range(1 , number + 1 ): __lowerCAmelCase : List[Any] = sys.maxsize __lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) ) for j in range(1 , root + 1 ): __lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)] __lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : List[str] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class A__ ( _lowerCamelCase): A_ : torch.FloatTensor A_ : torch.FloatTensor A_ : Optional[torch.FloatTensor] = None class A__ ( _lowerCamelCase , _lowerCamelCase): A_ : Any = 2 @register_to_config def __init__( self , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 1.007 , _SCREAMING_SNAKE_CASE = 80 , _SCREAMING_SNAKE_CASE = 0.05 , _SCREAMING_SNAKE_CASE = 50 , ): # standard deviation of the initial noise distribution __lowerCAmelCase : Optional[int] = sigma_max # setable values __lowerCAmelCase : int = None __lowerCAmelCase : np.IntTensor = None __lowerCAmelCase : torch.FloatTensor = None # sigma(t_i) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): return sample def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): __lowerCAmelCase : List[Any] = num_inference_steps __lowerCAmelCase : int = np.arange(0 , self.num_inference_steps )[::-1].copy() __lowerCAmelCase : Optional[int] = torch.from_numpy(_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] __lowerCAmelCase : List[str] = torch.tensor(_SCREAMING_SNAKE_CASE , dtype=torch.floataa , device=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): if self.config.s_min <= sigma <= self.config.s_max: __lowerCAmelCase : Dict = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 ) else: __lowerCAmelCase : Dict = 0 # sample eps ~ N(0, S_noise^2 * I) __lowerCAmelCase : int = self.config.s_noise * randn_tensor(sample.shape , generator=_SCREAMING_SNAKE_CASE ).to(sample.device ) __lowerCAmelCase : List[str] = sigma + gamma * sigma __lowerCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ): __lowerCAmelCase : int = sample_hat + sigma_hat * model_output __lowerCAmelCase : List[Any] = (sample_hat - pred_original_sample) / sigma_hat __lowerCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ): __lowerCAmelCase : int = sample_prev + sigma_prev * model_output __lowerCAmelCase : int = (sample_prev - pred_original_sample) / sigma_prev __lowerCAmelCase : Optional[Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=_SCREAMING_SNAKE_CASE , derivative=_SCREAMING_SNAKE_CASE , pred_original_sample=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): raise NotImplementedError()
86
"""simple docstring""" import tempfile import unittest import numpy as np import transformers from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel if is_torch_available(): import torch class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ): __lowerCAmelCase : Union[str, Any] = parent __lowerCAmelCase : Any = batch_size __lowerCAmelCase : Any = seq_length __lowerCAmelCase : Optional[Any] = is_training __lowerCAmelCase : Any = use_input_mask __lowerCAmelCase : Any = use_token_type_ids __lowerCAmelCase : Tuple = use_labels __lowerCAmelCase : Optional[Any] = vocab_size __lowerCAmelCase : Tuple = hidden_size __lowerCAmelCase : str = rotary_dim __lowerCAmelCase : Union[str, Any] = num_hidden_layers __lowerCAmelCase : Union[str, Any] = num_attention_heads __lowerCAmelCase : int = intermediate_size __lowerCAmelCase : List[str] = hidden_act __lowerCAmelCase : int = hidden_dropout_prob __lowerCAmelCase : Any = attention_probs_dropout_prob __lowerCAmelCase : List[Any] = max_position_embeddings __lowerCAmelCase : Optional[Any] = initializer_range __lowerCAmelCase : Tuple = None __lowerCAmelCase : int = vocab_size - 1 __lowerCAmelCase : Dict = vocab_size - 1 __lowerCAmelCase : int = vocab_size - 1 def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : List[str] = None if self.use_input_mask: __lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Optional[int] = GPTJConfig( vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , ) return (config, input_ids, input_mask) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs() __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs __lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask} return config, inputs_dict def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = 20 __lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' ) __lowerCAmelCase : Optional[Any] = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase : Any = model( input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCAmelCase : int = model( input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = 20 __lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = jnp.concatenate( [attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , ) __lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = jnp.broadcast_to( jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) ) __lowerCAmelCase : Optional[Any] = model( input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' ) __lowerCAmelCase : Tuple = model( input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" ) @require_flax class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else () A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else () def __lowerCamelCase ( self ): __lowerCAmelCase : int = FlaxGPTJModelTester(self ) def __lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.check_use_cache_forward_with_attn_mask( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @tooslow def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' ) __lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCAmelCase : Any = False __lowerCAmelCase : Any = model.config.eos_token_id __lowerCAmelCase : Union[str, Any] = jax.jit(model.generate ) __lowerCAmelCase : Optional[Any] = jit_generate( inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences __lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = [ 'Hello this is a long string of text.\n\nI\'m trying to get the text of the', 'Hey, I\'m a little late to the party. I\'m going to', ] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @is_pt_flax_cross_test def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape __lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : Tuple = 1 __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : Any = 1 __lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval() __lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) __lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = fx_state with torch.no_grad(): __lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple() __lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual( len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @is_pt_flax_cross_test def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): # prepare inputs __lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class __lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning __lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval() __lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa ) __lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params ) __lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape __lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) ) for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = 0 __lowerCAmelCase : Optional[Any] = 1 __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : Optional[Any] = 1 # make sure weights are tied in PyTorch pt_model.tie_weights() with torch.no_grad(): __lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple() __lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE ) with torch.no_grad(): __lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple() self.assertEqual( len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 ) @tooslow def __lowerCamelCase ( self ): for model_class_name in self.all_model_classes: __lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' ) __lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
86
1
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): return int((input_a, input_a).count(0 ) != 0 ) def __lowerCAmelCase (): assert nand_gate(0 , 0 ) == 1 assert nand_gate(0 , 1 ) == 1 assert nand_gate(1 , 0 ) == 1 assert nand_gate(1 , 1 ) == 0 if __name__ == "__main__": print(nand_gate(0, 0)) print(nand_gate(0, 1)) print(nand_gate(1, 0)) print(nand_gate(1, 1))
86
"""simple docstring""" from __future__ import annotations import os import tempfile import unittest from transformers import ConvBertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertModel, ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Tuple = parent __lowerCAmelCase : Optional[int] = 13 __lowerCAmelCase : List[Any] = 7 __lowerCAmelCase : int = True __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : List[Any] = True __lowerCAmelCase : Optional[int] = True __lowerCAmelCase : Optional[Any] = 99 __lowerCAmelCase : int = 3_84 __lowerCAmelCase : Union[str, Any] = 2 __lowerCAmelCase : Tuple = 4 __lowerCAmelCase : str = 37 __lowerCAmelCase : Any = 'gelu' __lowerCAmelCase : List[str] = 0.1 __lowerCAmelCase : Any = 0.1 __lowerCAmelCase : Union[str, Any] = 5_12 __lowerCAmelCase : int = 16 __lowerCAmelCase : Union[str, Any] = 2 __lowerCAmelCase : int = 0.02 __lowerCAmelCase : Dict = 3 __lowerCAmelCase : Tuple = 4 __lowerCAmelCase : Tuple = 1_28 __lowerCAmelCase : Optional[int] = 2 __lowerCAmelCase : List[str] = 9 __lowerCAmelCase : int = 1 __lowerCAmelCase : int = None def __lowerCamelCase ( self ): __lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Optional[int] = None if self.use_input_mask: __lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : Tuple = None if self.use_token_type_ids: __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Optional[Any] = None __lowerCAmelCase : Dict = None __lowerCAmelCase : Union[str, Any] = None if self.use_labels: __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : Union[str, Any] = ConvBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids} __lowerCAmelCase : Tuple = [input_ids, input_mask] __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = self.num_labels __lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = self.num_choices __lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) ) __lowerCAmelCase : Tuple = { 'input_ids': multiple_choice_inputs_ids, 'attention_mask': multiple_choice_input_mask, 'token_type_ids': multiple_choice_token_type_ids, } __lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = self.num_labels __lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = { 'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids, } __lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) : List[str] = config_and_inputs __lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_tf class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : List[str] = ( ( TFConvBertModel, TFConvBertForMaskedLM, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertForMultipleChoice, ) if is_tf_available() else () ) A_ : str = ( { 'feature-extraction': TFConvBertModel, 'fill-mask': TFConvBertForMaskedLM, 'question-answering': TFConvBertForQuestionAnswering, 'text-classification': TFConvBertForSequenceClassification, 'token-classification': TFConvBertForTokenClassification, 'zero-shot': TFConvBertForSequenceClassification, } if is_tf_available() else {} ) A_ : List[Any] = False A_ : str = False A_ : List[Any] = False def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = TFConvBertModelTester(self ) __lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = True if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ): __lowerCAmelCase : int = True __lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) for model_class in self.all_model_classes: __lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' ) __lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: __lowerCAmelCase : List[str] = outputs['encoder_hidden_states'] __lowerCAmelCase : Tuple = outputs['encoder_attentions'] else: __lowerCAmelCase : Optional[int] = outputs['hidden_states'] __lowerCAmelCase : Tuple = outputs['attentions'] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = getattr( self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertListEqual( list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common() __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length ) __lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length ) __lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE ) def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(out_len % 2 , 0 ) __lowerCAmelCase : Optional[Any] = outputs.decoder_attentions self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , ) def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = [ t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions) ] self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , ) for model_class in self.all_model_classes: __lowerCAmelCase : List[str] = True __lowerCAmelCase : Optional[int] = False __lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) if self.is_encoder_decoder: __lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ) # Check that output attentions can also be changed via the config del inputs_dict["output_attentions"] __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) # Check attention is always last and order is fine __lowerCAmelCase : Dict = True __lowerCAmelCase : Optional[Any] = True __lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) ) self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE ) check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ) @require_tf class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' ) __lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) __lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : Tuple = [1, 6, 7_68] self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = tf.constant( [ [ [-0.0347_5493, -0.468_6034, -0.3063_8832], [0.2263_7248, -0.2698_8646, -0.742_3424], [0.1032_4868, -0.4501_3508, -0.5828_0784], ] ] ) tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
86
1
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Any = [1] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = 0, 0, 0 __lowerCAmelCase : List[str] = ugly_nums[ia] * 2 __lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 3 __lowerCAmelCase : Any = ugly_nums[ia] * 5 for _ in range(1 , _UpperCamelCase ): __lowerCAmelCase : int = min(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ugly_nums.append(_UpperCamelCase ) if next_num == next_a: ia += 1 __lowerCAmelCase : Optional[Any] = ugly_nums[ia] * 2 if next_num == next_a: ia += 1 __lowerCAmelCase : Any = ugly_nums[ia] * 3 if next_num == next_a: ia += 1 __lowerCAmelCase : int = ugly_nums[ia] * 5 return ugly_nums[-1] if __name__ == "__main__": from doctest import testmod testmod(verbose=True) print(f'{ugly_numbers(200) = }')
86
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetrImageProcessor class A__ ( unittest.TestCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ): # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p __lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33} __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : int = batch_size __lowerCAmelCase : str = num_channels __lowerCAmelCase : Optional[int] = min_resolution __lowerCAmelCase : List[Any] = max_resolution __lowerCAmelCase : Union[str, Any] = do_resize __lowerCAmelCase : Optional[Any] = size __lowerCAmelCase : Dict = do_rescale __lowerCAmelCase : Optional[Any] = rescale_factor __lowerCAmelCase : Any = do_normalize __lowerCAmelCase : List[str] = image_mean __lowerCAmelCase : Union[str, Any] = image_std __lowerCAmelCase : Optional[int] = do_pad def __lowerCamelCase ( self ): return { "do_resize": self.do_resize, "size": self.size, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_pad": self.do_pad, } def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): if not batched: __lowerCAmelCase : str = image_inputs[0] if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ): __lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size else: __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2] if w < h: __lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w ) __lowerCAmelCase : Optional[int] = self.size['shortest_edge'] elif w > h: __lowerCAmelCase : str = self.size['shortest_edge'] __lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h ) else: __lowerCAmelCase : str = self.size['shortest_edge'] __lowerCAmelCase : Optional[Any] = self.size['shortest_edge'] else: __lowerCAmelCase : str = [] for image in image_inputs: __lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0] __lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class A__ ( _lowerCamelCase , unittest.TestCase): A_ : List[str] = DetrImageProcessor if is_vision_available() else None def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = DetrImageProcessingTester(self ) @property def __lowerCamelCase ( self ): return self.image_processor_tester.prepare_image_processor_dict() def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) ) self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE ) self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} ) self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): pass def __lowerCamelCase ( self ): # Initialize image_processing __lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image ) # Test not batched input __lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): # Initialize image_processing __lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray ) # Test not batched input __lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __lowerCamelCase ( self ): # Initialize image_processing __lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE ) for image in image_inputs: self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor ) # Test not batched input __lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values __lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __lowerCamelCase ( self ): # prepare image and target __lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f: __lowerCAmelCase : Any = json.loads(f.read() ) __lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target} # encode them __lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' ) __lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values __lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes __lowerCAmelCase : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __lowerCAmelCase : Dict = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd __lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels __lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify orig_size __lowerCAmelCase : int = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size __lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) @slow def __lowerCamelCase ( self ): # prepare image, target and masks_path __lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f: __lowerCAmelCase : Optional[int] = json.loads(f.read() ) __lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target} __lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' ) # encode them __lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' ) __lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) # verify pixel values __lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] ) self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) # verify area __lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] ) self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) ) # verify boxes __lowerCAmelCase : Optional[int] = torch.Size([6, 4] ) self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] ) self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) ) # verify image_id __lowerCAmelCase : str = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) ) # verify is_crowd __lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) ) # verify class_labels __lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) ) # verify masks __lowerCAmelCase : Dict = 82_28_73 self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE ) # verify orig_size __lowerCAmelCase : str = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) ) # verify size __lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
86
1
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class A__ ( _lowerCamelCase): A_ : Any = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'AutoImageProcessor' A_ : str = 'AutoTokenizer' def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = kwargs.pop('feature_extractor' ) __lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = self.image_processor __lowerCAmelCase : Tuple = False def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase : Dict = args[0] __lowerCAmelCase : Union[str, Any] = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None: __lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: __lowerCAmelCase : Union[str, Any] = encodings['input_ids'] return inputs def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @contextmanager def __lowerCamelCase ( self ): warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.' ) __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = self.tokenizer yield __lowerCAmelCase : Optional[int] = self.image_processor __lowerCAmelCase : Optional[Any] = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ): if added_vocab is None: __lowerCAmelCase : str = self.tokenizer.get_added_vocab() __lowerCAmelCase : List[Any] = {} while tokens: __lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break __lowerCAmelCase : Union[str, Any] = start_token.group(1 ) __lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) __lowerCAmelCase : str = start_token.group() if end_token is None: __lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' ) else: __lowerCAmelCase : Optional[Any] = end_token.group() __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: __lowerCAmelCase : List[str] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if value: if len(_SCREAMING_SNAKE_CASE ) == 1: __lowerCAmelCase : Tuple = value[0] __lowerCAmelCase : Tuple = value else: # leaf nodes __lowerCAmelCase : Any = [] for leaf in content.split(R'<sep/>' ): __lowerCAmelCase : List[Any] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens output[key].append(_SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: __lowerCAmelCase : str = output[key][0] __lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor
86
"""simple docstring""" import numpy as np def __lowerCAmelCase (_UpperCamelCase ): return 1 / (1 + np.exp(-vector )) def __lowerCAmelCase (_UpperCamelCase ): return vector * sigmoid(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""", """google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""", """google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""", """google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""", # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class A__ ( _lowerCamelCase): A_ : int = 'mobilenet_v2' def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=2_24 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu6" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.8 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.001 , _SCREAMING_SNAKE_CASE=2_55 , **_SCREAMING_SNAKE_CASE , ): super().__init__(**_SCREAMING_SNAKE_CASE ) if depth_multiplier <= 0: raise ValueError('depth_multiplier must be greater than zero.' ) __lowerCAmelCase : Union[str, Any] = num_channels __lowerCAmelCase : Optional[Any] = image_size __lowerCAmelCase : List[str] = depth_multiplier __lowerCAmelCase : int = depth_divisible_by __lowerCAmelCase : Union[str, Any] = min_depth __lowerCAmelCase : int = expand_ratio __lowerCAmelCase : Optional[Any] = output_stride __lowerCAmelCase : List[str] = first_layer_is_expansion __lowerCAmelCase : int = finegrained_output __lowerCAmelCase : Dict = hidden_act __lowerCAmelCase : Optional[Any] = tf_padding __lowerCAmelCase : Optional[int] = classifier_dropout_prob __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : Tuple = layer_norm_eps __lowerCAmelCase : List[Any] = semantic_loss_ignore_index class A__ ( _lowerCamelCase): A_ : List[str] = version.parse('1.11') @property def __lowerCamelCase ( self ): return OrderedDict([('pixel_values', {0: 'batch'})] ) @property def __lowerCamelCase ( self ): if self.task == "image-classification": return OrderedDict([('logits', {0: 'batch'})] ) else: return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] ) @property def __lowerCamelCase ( self ): return 1E-4
86
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Optional[int] = parent __lowerCAmelCase : Union[str, Any] = batch_size __lowerCAmelCase : Dict = seq_length __lowerCAmelCase : Dict = is_training __lowerCAmelCase : List[str] = use_input_mask __lowerCAmelCase : int = use_token_type_ids __lowerCAmelCase : Optional[int] = use_labels __lowerCAmelCase : List[Any] = vocab_size __lowerCAmelCase : Dict = hidden_size __lowerCAmelCase : Tuple = embedding_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Union[str, Any] = intermediate_size __lowerCAmelCase : Optional[Any] = hidden_act __lowerCAmelCase : Optional[int] = hidden_dropout_prob __lowerCAmelCase : Dict = attention_probs_dropout_prob __lowerCAmelCase : Any = max_position_embeddings __lowerCAmelCase : Any = type_vocab_size __lowerCAmelCase : Union[str, Any] = type_sequence_label_size __lowerCAmelCase : List[str] = initializer_range __lowerCAmelCase : str = num_labels __lowerCAmelCase : int = num_choices __lowerCAmelCase : Union[str, Any] = scope def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowerCAmelCase : Optional[int] = None if self.use_input_mask: __lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __lowerCAmelCase : str = None if self.use_token_type_ids: __lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowerCAmelCase : Union[str, Any] = None __lowerCAmelCase : Optional[int] = None __lowerCAmelCase : Union[str, Any] = None if self.use_labels: __lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices ) __lowerCAmelCase : str = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __lowerCamelCase ( self ): return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Dict = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[Any] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : List[str] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = self.num_labels __lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = self.num_labels __lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = self.num_choices __lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE ) model.to(_SCREAMING_SNAKE_CASE ) model.eval() __lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __lowerCAmelCase : List[str] = model( _SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs() ( ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ( __lowerCAmelCase ) , ) : Optional[Any] = config_and_inputs __lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase): A_ : str = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) A_ : List[str] = ( { 'feature-extraction': MobileBertModel, 'fill-mask': MobileBertForMaskedLM, 'question-answering': MobileBertForQuestionAnswering, 'text-classification': MobileBertForSequenceClassification, 'token-classification': MobileBertForTokenClassification, 'zero-shot': MobileBertForSequenceClassification, } if is_torch_available() else {} ) A_ : Dict = True def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ): __lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE ) if return_labels: if model_class in get_values(_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE ) return inputs_dict def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = MobileBertModelTester(self ) __lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 ) def __lowerCamelCase ( self ): self.config_tester.run_common_tests() def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE ) def __lowerCAmelCase (_UpperCamelCase ): return torch.tensor( _UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , ) lowerCamelCase__ = 1E-3 @require_torch @require_sentencepiece @require_tokenizers class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] ) with torch.no_grad(): __lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) ) self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = torch.tensor( [ [ [-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05], [-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00], [2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01], ] ] , device=_SCREAMING_SNAKE_CASE , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE __lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) __lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
86
1
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print("""Program to check whether a number is a Perfect number or not...""") lowerCamelCase__ = int(input("""Enter number: """).strip()) print(f'{number} is {"" if perfect(number) else "not "}a Perfect Number.')
86
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class A__ ( _lowerCamelCase): A_ : Any = ['image_processor', 'tokenizer'] A_ : Optional[Any] = 'AutoImageProcessor' A_ : str = 'AutoTokenizer' def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( 'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`' ' instead.' , _SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Any = kwargs.pop('feature_extractor' ) __lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('You need to specify an `image_processor`.' ) if tokenizer is None: raise ValueError('You need to specify a `tokenizer`.' ) super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = self.image_processor __lowerCAmelCase : Tuple = False def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # For backward compatibility if self._in_target_context_manager: return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 0: __lowerCAmelCase : Dict = args[0] __lowerCAmelCase : Union[str, Any] = args[1:] if images is None and text is None: raise ValueError('You need to specify either an `images` or `text` input to process.' ) if images is not None: __lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is not None: __lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) if text is None: return inputs elif images is None: return encodings else: __lowerCAmelCase : Union[str, Any] = encodings['input_ids'] return inputs def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @contextmanager def __lowerCamelCase ( self ): warnings.warn( '`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ' 'labels by using the argument `text` of the regular `__call__` method (either in the same call as ' 'your images inputs, or in a separate call.' ) __lowerCAmelCase : Any = True __lowerCAmelCase : Dict = self.tokenizer yield __lowerCAmelCase : Optional[int] = self.image_processor __lowerCAmelCase : Optional[Any] = False def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ): if added_vocab is None: __lowerCAmelCase : str = self.tokenizer.get_added_vocab() __lowerCAmelCase : List[Any] = {} while tokens: __lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if start_token is None: break __lowerCAmelCase : Union[str, Any] = start_token.group(1 ) __lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) __lowerCAmelCase : str = start_token.group() if end_token is None: __lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' ) else: __lowerCAmelCase : Optional[Any] = end_token.group() __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE ) if content is not None: __lowerCAmelCase : List[str] = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if value: if len(_SCREAMING_SNAKE_CASE ) == 1: __lowerCAmelCase : Tuple = value[0] __lowerCAmelCase : Tuple = value else: # leaf nodes __lowerCAmelCase : Any = [] for leaf in content.split(R'<sep/>' ): __lowerCAmelCase : List[Any] = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens output[key].append(_SCREAMING_SNAKE_CASE ) if len(output[key] ) == 1: __lowerCAmelCase : str = output[key][0] __lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor_class @property def __lowerCamelCase ( self ): warnings.warn( '`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , ) return self.image_processor
86
1
"""simple docstring""" from itertools import product def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Any = sides_number __lowerCAmelCase : Tuple = max_face_number * dice_number __lowerCAmelCase : List[Any] = [0] * (max_total + 1) __lowerCAmelCase : int = 1 __lowerCAmelCase : Optional[Any] = range(_UpperCamelCase , max_face_number + 1 ) for dice_numbers in product(_UpperCamelCase , repeat=_UpperCamelCase ): __lowerCAmelCase : int = sum(_UpperCamelCase ) totals_frequencies[total] += 1 return totals_frequencies def __lowerCAmelCase (): __lowerCAmelCase : int = total_frequency_distribution( sides_number=4 , dice_number=9 ) __lowerCAmelCase : str = total_frequency_distribution( sides_number=6 , dice_number=6 ) __lowerCAmelCase : Optional[int] = 0 __lowerCAmelCase : List[Any] = 9 __lowerCAmelCase : Tuple = 4 * 9 __lowerCAmelCase : Union[str, Any] = 6 for peter_total in range(_UpperCamelCase , max_peter_total + 1 ): peter_wins_count += peter_totals_frequencies[peter_total] * sum( colin_totals_frequencies[min_colin_total:peter_total] ) __lowerCAmelCase : Tuple = (4**9) * (6**6) __lowerCAmelCase : Union[str, Any] = peter_wins_count / total_games_number __lowerCAmelCase : Dict = round(_UpperCamelCase , ndigits=7 ) return rounded_peter_win_probability if __name__ == "__main__": print(f'{solution() = }')
86
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Tuple = 0 while num > 0: digit_sum += num % 10 num //= 10 return digit_sum def __lowerCAmelCase (_UpperCamelCase = 100 ): __lowerCAmelCase : Optional[int] = 1 __lowerCAmelCase : Optional[Any] = 2 for i in range(2 , max_n + 1 ): __lowerCAmelCase : Any = pre_numerator __lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1 __lowerCAmelCase : int = cur_numerator __lowerCAmelCase : Dict = e_cont * pre_numerator + temp return sum_digits(_UpperCamelCase ) if __name__ == "__main__": print(f'{solution() = }')
86
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class A__ ( _lowerCamelCase): A_ : Any = 'naver-clova-ix/donut-base-finetuned-docvqa' A_ : List[Any] = ( 'This is a tool that answers a question about an document (pdf). It takes an input named `document` which ' 'should be the document containing the information, as well as a `question` that is the question about the ' 'document. It returns a text that contains the answer to the question.' ) A_ : Union[str, Any] = 'document_qa' A_ : List[Any] = AutoProcessor A_ : Union[str, Any] = VisionEncoderDecoderModel A_ : int = ['image', 'text'] A_ : Union[str, Any] = ['text'] def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): if not is_vision_available(): raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' ) super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = '<s_docvqa><s_question>{user_input}</s_question><s_answer>' __lowerCAmelCase : Dict = task_prompt.replace('{user_input}' , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = self.pre_processor.tokenizer( _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids __lowerCAmelCase : List[Any] = self.pre_processor(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): return self.model.generate( inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_SCREAMING_SNAKE_CASE , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_SCREAMING_SNAKE_CASE , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_SCREAMING_SNAKE_CASE , ).sequences def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[Any] = self.pre_processor.batch_decode(_SCREAMING_SNAKE_CASE )[0] __lowerCAmelCase : Any = sequence.replace(self.pre_processor.tokenizer.eos_token , '' ) __lowerCAmelCase : List[str] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' ) __lowerCAmelCase : Optional[Any] = re.sub(R'<.*?>' , '' , _SCREAMING_SNAKE_CASE , count=1 ).strip() # remove first task start token __lowerCAmelCase : List[str] = self.pre_processor.tokenajson(_SCREAMING_SNAKE_CASE ) return sequence["answer"]
86
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""", """microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""", } class A__ ( _lowerCamelCase): A_ : List[Any] = 'markuplm' def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): super().__init__( pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = vocab_size __lowerCAmelCase : Any = hidden_size __lowerCAmelCase : List[Any] = num_hidden_layers __lowerCAmelCase : Tuple = num_attention_heads __lowerCAmelCase : Union[str, Any] = hidden_act __lowerCAmelCase : List[Any] = intermediate_size __lowerCAmelCase : List[str] = hidden_dropout_prob __lowerCAmelCase : List[str] = attention_probs_dropout_prob __lowerCAmelCase : Optional[int] = max_position_embeddings __lowerCAmelCase : int = type_vocab_size __lowerCAmelCase : Tuple = initializer_range __lowerCAmelCase : int = layer_norm_eps __lowerCAmelCase : List[str] = position_embedding_type __lowerCAmelCase : List[Any] = use_cache __lowerCAmelCase : Optional[Any] = classifier_dropout # additional properties __lowerCAmelCase : Optional[int] = max_depth __lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings __lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings __lowerCAmelCase : Any = tag_pad_id __lowerCAmelCase : Union[str, Any] = subs_pad_id __lowerCAmelCase : int = xpath_unit_hidden_size
86
1
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class A__ ( _lowerCamelCase): A_ : Union[str, Any] = ['image_processor', 'tokenizer'] A_ : List[str] = 'BlipImageProcessor' A_ : List[str] = 'AutoTokenizer' def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = False super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.image_processor def __call__( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 0 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ): if images is None and text is None: raise ValueError('You have to specify either images or text.' ) # Get only text if images is None: __lowerCAmelCase : int = self.tokenizer __lowerCAmelCase : Tuple = self.tokenizer( text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) return text_encoding # add pixel_values __lowerCAmelCase : str = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) if text is not None: __lowerCAmelCase : Dict = self.tokenizer( text=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_overflowing_tokens=_SCREAMING_SNAKE_CASE , return_special_tokens_mask=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_length=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) else: __lowerCAmelCase : Any = None if text_encoding is not None: encoding_image_processor.update(_SCREAMING_SNAKE_CASE ) return encoding_image_processor def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) @property # Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names def __lowerCamelCase ( self ): __lowerCAmelCase : str = self.tokenizer.model_input_names __lowerCAmelCase : int = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
86
"""simple docstring""" import argparse import torch # Step 1. clone https://github.com/microsoft/unilm # Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd # Step 3. cd unilm # Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink # import classes from unilm.wavlm.WavLM import WavLM as WavLMOrig from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig from transformers import WavLMConfig, WavLMModel, logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """post_extract_proj""": """feature_projection.projection""", """encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""", """self_attn.k_proj""": """encoder.layers.*.attention.k_proj""", """self_attn.v_proj""": """encoder.layers.*.attention.v_proj""", """self_attn.q_proj""": """encoder.layers.*.attention.q_proj""", """self_attn.out_proj""": """encoder.layers.*.attention.out_proj""", """self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""", """self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""", """self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""", """self_attn_layer_norm""": """encoder.layers.*.layer_norm""", """fc1""": """encoder.layers.*.feed_forward.intermediate_dense""", """fc2""": """encoder.layers.*.feed_forward.output_dense""", """final_layer_norm""": """encoder.layers.*.final_layer_norm""", """encoder.layer_norm""": """encoder.layer_norm""", """w2v_model.layer_norm""": """feature_projection.layer_norm""", """quantizer.weight_proj""": """quantizer.weight_proj""", """quantizer.vars""": """quantizer.codevectors""", """project_q""": """project_q""", """final_proj""": """project_hid""", """w2v_encoder.proj""": """ctc_proj""", """mask_emb""": """masked_spec_embed""", } lowerCamelCase__ = [ """ctc_proj""", """quantizer.weight_proj""", """quantizer.codevectors""", """project_q""", """project_hid""", ] def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): for attribute in key.split('.' ): __lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: __lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: __lowerCAmelCase : Dict = hf_pointer.shape assert hf_shape == value.shape, ( F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" F" {value.shape} for {full_name}" ) if weight_type == "weight": __lowerCAmelCase : Union[str, Any] = value elif weight_type == "weight_g": __lowerCAmelCase : List[Any] = value elif weight_type == "weight_v": __lowerCAmelCase : Any = value elif weight_type == "bias": __lowerCAmelCase : List[str] = value else: __lowerCAmelCase : List[Any] = value logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Any = [] __lowerCAmelCase : Optional[int] = fairseq_model.state_dict() __lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowerCAmelCase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , ) __lowerCAmelCase : str = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: __lowerCAmelCase : int = True if "*" in mapped_key: __lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2] __lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase ) if "weight_g" in name: __lowerCAmelCase : Union[str, Any] = 'weight_g' elif "weight_v" in name: __lowerCAmelCase : int = 'weight_v' elif "bias" in name and "relative_attention_bias" not in name: __lowerCAmelCase : Optional[Any] = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __lowerCAmelCase : List[str] = 'weight' else: __lowerCAmelCase : Optional[Any] = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(F"Unused weights: {unused_weights}" ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1] __lowerCAmelCase : Any = name.split('.' ) __lowerCAmelCase : List[Any] = int(items[0] ) __lowerCAmelCase : Tuple = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) __lowerCAmelCase : Tuple = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) __lowerCAmelCase : int = value logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) __lowerCAmelCase : Optional[Any] = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F"{full_name} has size {value.shape}, but" F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) __lowerCAmelCase : Any = value logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ): # load the pre-trained checkpoints __lowerCAmelCase : Any = torch.load(_UpperCamelCase ) __lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] ) __lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase ) model.load_state_dict(checkpoint['model'] ) model.eval() if config_path is not None: __lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase ) else: __lowerCAmelCase : List[str] = WavLMConfig() __lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase ) recursively_load_weights(_UpperCamelCase , _UpperCamelCase ) hf_wavlm.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") lowerCamelCase__ = parser.parse_args() convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
86
1
"""simple docstring""" import collections import json import math import os import re import time from fnmatch import fnmatch from typing import Dict import requests from slack_sdk import WebClient lowerCamelCase__ = WebClient(token=os.environ["""CI_SLACK_BOT_TOKEN"""]) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : List[str] = test_results.split(' ' ) __lowerCAmelCase : Tuple = 0 __lowerCAmelCase : List[Any] = 0 # When the output is short enough, the output is surrounded by = signs: "== OUTPUT ==" # When it is too long, those signs are not present. __lowerCAmelCase : int = expressions[-2] if '=' in expressions[-1] else expressions[-1] for i, expression in enumerate(_UpperCamelCase ): if "failed" in expression: failed += int(expressions[i - 1] ) if "passed" in expression: success += int(expressions[i - 1] ) return failed, success, time_spent def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[int] = {} __lowerCAmelCase : int = None __lowerCAmelCase : List[Any] = False for line in failures_short_lines.split('\n' ): if re.search(r'_ \[doctest\]' , _UpperCamelCase ): __lowerCAmelCase : List[str] = True __lowerCAmelCase : Union[str, Any] = line.split(' ' )[2] elif in_error and not line.split(' ' )[0].isdigit(): __lowerCAmelCase : Union[str, Any] = line __lowerCAmelCase : Any = False return failures class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = title __lowerCAmelCase : List[Any] = doc_test_results['time_spent'].split(',' )[0] __lowerCAmelCase : Optional[int] = doc_test_results['success'] __lowerCAmelCase : Dict = doc_test_results['failures'] __lowerCAmelCase : Tuple = self.n_success + self.n_failures # Failures and success of the modeling tests __lowerCAmelCase : Optional[int] = doc_test_results @property def __lowerCamelCase ( self ): __lowerCAmelCase : Union[str, Any] = [self._time_spent] __lowerCAmelCase : int = 0 for time in time_spent: __lowerCAmelCase : Tuple = time.split(':' ) # Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute. if len(_SCREAMING_SNAKE_CASE ) == 1: __lowerCAmelCase : Dict = [0, 0, time_parts[0]] __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] ) total_secs += hours * 36_00 + minutes * 60 + seconds __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60 return f"{int(_SCREAMING_SNAKE_CASE )}h{int(_SCREAMING_SNAKE_CASE )}m{int(_SCREAMING_SNAKE_CASE )}s" @property def __lowerCamelCase ( self ): return {"type": "header", "text": {"type": "plain_text", "text": self.title}} @property def __lowerCamelCase ( self ): return { "type": "section", "text": { "type": "plain_text", "text": f"🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.", "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def __lowerCamelCase ( self ): return { "type": "section", "text": { "type": "plain_text", "text": ( f"There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in" f" {self.time}." ), "emoji": True, }, "accessory": { "type": "button", "text": {"type": "plain_text", "text": "Check Action results", "emoji": True}, "url": f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } @property def __lowerCamelCase ( self ): __lowerCAmelCase : Any = 40 __lowerCAmelCase : int = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )} __lowerCAmelCase : Any = '' for category, failures in category_failures.items(): if len(_SCREAMING_SNAKE_CASE ) == 0: continue if report != "": report += "\n\n" report += f"*{category} failures*:".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n" report += "`" report += "`\n`".join(_SCREAMING_SNAKE_CASE ) report += "`" return { "type": "section", "text": { "type": "mrkdwn", "text": f"The following examples had failures:\n\n\n{report}\n", }, } @property def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = [self.header] if self.n_failures > 0: blocks.append(self.failures ) if self.n_failures > 0: blocks.extend([self.category_failures] ) if self.n_failures == 0: blocks.append(self.no_failures ) return json.dumps(_SCREAMING_SNAKE_CASE ) @staticmethod def __lowerCamelCase ( ): __lowerCAmelCase : Dict = [ { 'type': 'section', 'text': { 'type': 'plain_text', 'text': 'There was an issue running the tests.', }, 'accessory': { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True}, 'url': f"https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}", }, } ] print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(_SCREAMING_SNAKE_CASE )} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self ): print('Sending the following payload' ) print(json.dumps({'blocks': json.loads(self.payload )} ) ) __lowerCAmelCase : Any = f"{self.n_failures} failures out of {self.n_tests} tests," if self.n_failures else 'All tests passed.' __lowerCAmelCase : Optional[Any] = client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_SCREAMING_SNAKE_CASE , ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = '' for key, value in failures.items(): __lowerCAmelCase : str = value[:2_00] + ' [Truncated]' if len(_SCREAMING_SNAKE_CASE ) > 2_50 else value failures_text += f"*{key}*\n_{value}_\n\n" __lowerCAmelCase : int = job_name __lowerCAmelCase : str = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}} if job_link is not None: __lowerCAmelCase : int = { 'type': 'button', 'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True}, 'url': job_link, } return [ {"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}}, content, {"type": "section", "text": {"type": "mrkdwn", "text": failures_text}}, ] def __lowerCamelCase ( self ): if self.thread_ts is None: raise ValueError('Can only post reply if a post has been made.' ) __lowerCAmelCase : int = self.doc_test_results.pop('job_link' ) self.doc_test_results.pop('failures' ) self.doc_test_results.pop('success' ) self.doc_test_results.pop('time_spent' ) __lowerCAmelCase : Union[str, Any] = sorted(self.doc_test_results.items() , key=lambda _SCREAMING_SNAKE_CASE : t[0] ) for job, job_result in sorted_dict: if len(job_result['failures'] ): __lowerCAmelCase : List[Any] = f"*Num failures* :{len(job_result['failed'] )} \n" __lowerCAmelCase : Optional[int] = job_result['failures'] __lowerCAmelCase : Dict = self.get_reply_blocks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , text=_SCREAMING_SNAKE_CASE ) print('Sending the following reply' ) print(json.dumps({'blocks': blocks} ) ) client.chat_postMessage( channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=f"Results for {job}" , blocks=_SCREAMING_SNAKE_CASE , thread_ts=self.thread_ts['ts'] , ) time.sleep(1 ) def __lowerCAmelCase (): __lowerCAmelCase : int = os.environ['GITHUB_RUN_ID'] __lowerCAmelCase : Optional[int] = F"https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100" __lowerCAmelCase : int = requests.get(_UpperCamelCase ).json() __lowerCAmelCase : int = {} try: jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) __lowerCAmelCase : Optional[int] = math.ceil((result['total_count'] - 100) / 100 ) for i in range(_UpperCamelCase ): __lowerCAmelCase : int = requests.get(url + F"&page={i + 2}" ).json() jobs.update({job['name']: job['html_url'] for job in result['jobs']} ) return jobs except Exception as e: print('Unknown error, could not fetch links.' , _UpperCamelCase ) return {} def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : List[str] = {} if os.path.exists(_UpperCamelCase ): __lowerCAmelCase : Any = os.listdir(_UpperCamelCase ) for file in files: try: with open(os.path.join(_UpperCamelCase , _UpperCamelCase ) , encoding='utf-8' ) as f: __lowerCAmelCase : List[str] = f.read() except UnicodeDecodeError as e: raise ValueError(F"Could not open {os.path.join(_UpperCamelCase , _UpperCamelCase )}." ) from e return _artifact def __lowerCAmelCase (): class A__ : def __init__( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = name __lowerCAmelCase : str = [] def __str__( self ): return self.name def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): self.paths.append({'name': self.name, 'path': path} ) __lowerCAmelCase : Dict[str, Artifact] = {} __lowerCAmelCase : Optional[Any] = filter(os.path.isdir , os.listdir() ) for directory in directories: __lowerCAmelCase : Optional[int] = directory if artifact_name not in _available_artifacts: __lowerCAmelCase : Union[str, Any] = Artifact(_UpperCamelCase ) _available_artifacts[artifact_name].add_path(_UpperCamelCase ) return _available_artifacts if __name__ == "__main__": lowerCamelCase__ = get_job_links() lowerCamelCase__ = retrieve_available_artifacts() lowerCamelCase__ = collections.OrderedDict( [ ("""*.py""", """API Examples"""), ("""*.md""", """MD Examples"""), ] ) # This dict will contain all the information relative to each doc test category: # - failed: list of failed tests # - failures: dict in the format 'test': 'error_message' lowerCamelCase__ = { v: { """failed""": [], """failures""": {}, } for v in docs.values() } # Link to the GitHub Action job lowerCamelCase__ = github_actions_job_links.get("""run_doctests""") lowerCamelCase__ = available_artifacts["""doc_tests_gpu_test_reports"""].paths[0] lowerCamelCase__ = retrieve_artifact(artifact_path["""name"""]) if "stats" in artifact: lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = handle_test_results(artifact["""stats"""]) lowerCamelCase__ = failed lowerCamelCase__ = success lowerCamelCase__ = time_spent[1:-1] + """, """ lowerCamelCase__ = extract_first_line_failure(artifact["""failures_short"""]) for line in artifact["summary_short"].split("""\n"""): if re.search("""FAILED""", line): lowerCamelCase__ = line.replace("""FAILED """, """""") lowerCamelCase__ = line.split()[0].replace("""\n""", """""") if "::" in line: lowerCamelCase__ , lowerCamelCase__ = line.split("""::""") else: lowerCamelCase__ , lowerCamelCase__ = line, line for file_regex in docs.keys(): if fnmatch(file_path, file_regex): lowerCamelCase__ = docs[file_regex] doc_test_results[category]["failed"].append(test) lowerCamelCase__ = all_failures[test] if test in all_failures else """N/A""" lowerCamelCase__ = failure break lowerCamelCase__ = Message("""🤗 Results of the doc tests.""", doc_test_results) message.post() message.post_reply()
86
"""simple docstring""" import os import pytest from attr import dataclass lowerCamelCase__ = """us-east-1""" # defaults region @dataclass class A__ : A_ : str A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' A_ : Optional[int] = { 'task_name': 'mnli', 'per_device_train_batch_size': 1_6, 'per_device_eval_batch_size': 1_6, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 5_0_0, 'save_steps': 5_5_0_0, } A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0} @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowerCamelCase ( self ): return f"{self.framework}-transfromers-test" @property def __lowerCamelCase ( self ): return f"./tests/sagemaker/scripts/{self.framework}" @property def __lowerCamelCase ( self ): if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework )
86
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase__ = { """configuration_xlm_roberta""": [ """XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLMRobertaConfig""", """XLMRobertaOnnxConfig""", ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""XLMRobertaTokenizer"""] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ["""XLMRobertaTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """XLMRobertaForCausalLM""", """XLMRobertaForMaskedLM""", """XLMRobertaForMultipleChoice""", """XLMRobertaForQuestionAnswering""", """XLMRobertaForSequenceClassification""", """XLMRobertaForTokenClassification""", """XLMRobertaModel""", """XLMRobertaPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFXLMRobertaForCausalLM""", """TFXLMRobertaForMaskedLM""", """TFXLMRobertaForMultipleChoice""", """TFXLMRobertaForQuestionAnswering""", """TFXLMRobertaForSequenceClassification""", """TFXLMRobertaForTokenClassification""", """TFXLMRobertaModel""", """TFXLMRobertaPreTrainedModel""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ """FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""", """FlaxXLMRobertaForMaskedLM""", """FlaxXLMRobertaForCausalLM""", """FlaxXLMRobertaForMultipleChoice""", """FlaxXLMRobertaForQuestionAnswering""", """FlaxXLMRobertaForSequenceClassification""", """FlaxXLMRobertaForTokenClassification""", """FlaxXLMRobertaModel""", """FlaxXLMRobertaPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
86
"""simple docstring""" from __future__ import annotations lowerCamelCase__ = list[tuple[int, int]] lowerCamelCase__ = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = pos_x __lowerCAmelCase : Optional[Any] = pos_y __lowerCAmelCase : Optional[int] = (pos_y, pos_x) __lowerCAmelCase : Union[str, Any] = goal_x __lowerCAmelCase : Any = goal_y __lowerCAmelCase : Optional[Any] = g_cost __lowerCAmelCase : Any = parent __lowerCAmelCase : Union[str, Any] = self.calculate_heuristic() def __lowerCamelCase ( self ): __lowerCAmelCase : str = abs(self.pos_x - self.goal_x ) __lowerCAmelCase : str = abs(self.pos_y - self.goal_y ) return dx + dy def __lt__( self , _SCREAMING_SNAKE_CASE ): return self.f_cost < other.f_cost class A__ : def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = [self.start] __lowerCAmelCase : list[Node] = [] __lowerCAmelCase : str = False def __lowerCamelCase ( self ): while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() __lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: __lowerCAmelCase : Union[str, Any] = True return self.retrace_path(_SCREAMING_SNAKE_CASE ) self.closed_nodes.append(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: # retrieve the best current path __lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) else: self.open_nodes.append(_SCREAMING_SNAKE_CASE ) if not self.reached: return [self.start.pos] return None def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = [] for action in delta: __lowerCAmelCase : Optional[int] = parent.pos_x + action[1] __lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) ) return successors def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = node __lowerCAmelCase : Optional[int] = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) __lowerCAmelCase : int = current_node.parent path.reverse() return path if __name__ == "__main__": lowerCamelCase__ = (0, 0) lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) print("""------""") lowerCamelCase__ = GreedyBestFirst(init, goal) lowerCamelCase__ = greedy_bf.search() if path: for pos_x, pos_y in path: lowerCamelCase__ = 2 for elem in grid: print(elem)
86
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from diffusers import ( DDIMScheduler, KandinskyVaaInpaintPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Any = KandinskyVaaInpaintPipeline A_ : Tuple = ['image_embeds', 'negative_image_embeds', 'image', 'mask_image'] A_ : List[str] = [ 'image_embeds', 'negative_image_embeds', 'image', 'mask_image', ] A_ : List[str] = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] A_ : Tuple = False @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return 32 @property def __lowerCamelCase ( self ): return self.time_input_dim @property def __lowerCamelCase ( self ): return self.time_input_dim * 4 @property def __lowerCamelCase ( self ): return 1_00 @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = { 'in_channels': 9, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } __lowerCAmelCase : Optional[Any] = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE ) return model @property def __lowerCamelCase ( self ): return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : str = VQModel(**self.dummy_movq_kwargs ) return model def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = self.dummy_unet __lowerCAmelCase : List[str] = self.dummy_movq __lowerCAmelCase : Union[str, Any] = DDIMScheduler( num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : str = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): __lowerCAmelCase : List[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( _SCREAMING_SNAKE_CASE ) # create init_image __lowerCAmelCase : Tuple = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] __lowerCAmelCase : List[Any] = Image.fromarray(np.uinta(_SCREAMING_SNAKE_CASE ) ).convert('RGB' ).resize((2_56, 2_56) ) # create mask __lowerCAmelCase : Optional[Any] = np.ones((64, 64) , dtype=np.floataa ) __lowerCAmelCase : Dict = 0 if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : Dict = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : Optional[int] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = { 'image': init_image, 'mask_image': mask, 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'generator': generator, 'height': 64, 'width': 64, 'num_inference_steps': 2, 'guidance_scale': 4.0, 'output_type': 'np', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = 'cpu' __lowerCAmelCase : Union[str, Any] = self.get_dummy_components() __lowerCAmelCase : List[Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) ) __lowerCAmelCase : Union[str, Any] = output.images __lowerCAmelCase : Optional[Any] = pipe( **self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0] __lowerCAmelCase : str = image[0, -3:, -3:, -1] __lowerCAmelCase : int = image_from_tuple[0, -3:, -3:, -1] print(f"image.shape {image.shape}" ) assert image.shape == (1, 64, 64, 3) __lowerCAmelCase : List[Any] = np.array( [0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_slice.flatten()}" assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 ), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}" def __lowerCamelCase ( self ): super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy' ) __lowerCAmelCase : Optional[int] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' ) __lowerCAmelCase : Union[str, Any] = np.ones((7_68, 7_68) , dtype=np.floataa ) __lowerCAmelCase : List[str] = 0 __lowerCAmelCase : Dict = 'a hat' __lowerCAmelCase : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa ) pipe_prior.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = KandinskyVaaInpaintPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-decoder-inpaint' , torch_dtype=torch.floataa ) __lowerCAmelCase : str = pipeline.to(_SCREAMING_SNAKE_CASE ) pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = pipe_prior( _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple() __lowerCAmelCase : int = pipeline( image=_SCREAMING_SNAKE_CASE , mask_image=_SCREAMING_SNAKE_CASE , image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type='np' , ) __lowerCAmelCase : Optional[int] = output.images[0] assert image.shape == (7_68, 7_68, 3) assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
86
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float(moles / volume ) * nfactor ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (volume) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
86
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SwiftFormerConfig, SwiftFormerForImageClassification, ViTImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = torch.device("""cpu""") def __lowerCAmelCase (): __lowerCAmelCase : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg' __lowerCAmelCase : Dict = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def __lowerCAmelCase (_UpperCamelCase ): if swiftformer_name == "swiftformer_xs": return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] ) elif swiftformer_name == "swiftformer_s": return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] ) elif swiftformer_name == "swiftformer_l1": return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] ) elif swiftformer_name == "swiftformer_l3": return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Tuple = dct.pop(_UpperCamelCase ) __lowerCAmelCase : Dict = val def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[Any] = [] for k in state_dict.keys(): __lowerCAmelCase : str = k if ".pwconv" in k: __lowerCAmelCase : List[str] = k_new.replace('.pwconv' , '.point_wise_conv' ) if ".dwconv" in k: __lowerCAmelCase : Tuple = k_new.replace('.dwconv' , '.depth_wise_conv' ) if ".Proj." in k: __lowerCAmelCase : Optional[int] = k_new.replace('.Proj.' , '.proj.' ) if "patch_embed" in k_new: __lowerCAmelCase : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' ) if "network" in k_new: __lowerCAmelCase : Optional[Any] = k_new.split('.' ) if ls[2].isdigit(): __lowerCAmelCase : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] ) else: __lowerCAmelCase : int = k_new.replace('network' , 'swiftformer.encoder.network' ) rename_keys.append((k, k_new) ) return rename_keys @torch.no_grad() def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Tuple = SwiftFormerConfig() # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size __lowerCAmelCase : Tuple = 1000 __lowerCAmelCase : Tuple = 'huggingface/label-files' __lowerCAmelCase : Any = 'imagenet-1k-id2label.json' __lowerCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) ) __lowerCAmelCase : Optional[Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __lowerCAmelCase : Optional[Any] = idalabel __lowerCAmelCase : Any = {v: k for k, v in idalabel.items()} # size of the architecture if swiftformer_name == "swiftformer_xs": __lowerCAmelCase : Optional[int] = [3, 3, 6, 4] __lowerCAmelCase : Union[str, Any] = [48, 56, 112, 220] elif swiftformer_name == "swiftformer_s": __lowerCAmelCase : str = [3, 3, 9, 6] __lowerCAmelCase : Tuple = [48, 64, 168, 224] elif swiftformer_name == "swiftformer_l1": __lowerCAmelCase : int = [4, 3, 10, 5] __lowerCAmelCase : List[Any] = [48, 96, 192, 384] elif swiftformer_name == "swiftformer_l3": __lowerCAmelCase : Union[str, Any] = [4, 4, 12, 6] __lowerCAmelCase : List[str] = [64, 128, 320, 512] # load state_dict of original model, remove and rename some keys if original_ckpt: if original_ckpt.startswith('https' ): __lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' , check_hash=_UpperCamelCase ) else: __lowerCAmelCase : Optional[int] = torch.load(_UpperCamelCase , map_location='cpu' ) __lowerCAmelCase : List[Any] = checkpoint __lowerCAmelCase : Optional[int] = create_rename_keys(_UpperCamelCase ) for rename_key_src, rename_key_dest in rename_keys: rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # load HuggingFace model __lowerCAmelCase : Optional[Any] = SwiftFormerForImageClassification(_UpperCamelCase ).eval() hf_model.load_state_dict(_UpperCamelCase ) # prepare test inputs __lowerCAmelCase : Optional[Any] = prepare_img() __lowerCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained('preprocessor_config' ) __lowerCAmelCase : Dict = processor(images=_UpperCamelCase , return_tensors='pt' ) # compare outputs from both models __lowerCAmelCase : int = get_expected_output(_UpperCamelCase ) __lowerCAmelCase : Tuple = hf_model(inputs['pixel_values'] ).logits assert hf_logits.shape == torch.Size([1, 1000] ) assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1e-3 ) Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase ) print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" ) hf_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = argparse.ArgumentParser() # Required parameters parser.add_argument( """--swiftformer_name""", default="""swiftformer_xs""", choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""], type=str, help="""Name of the SwiftFormer model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default="""./converted_outputs/""", type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""") lowerCamelCase__ = parser.parse_args() convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
86
"""simple docstring""" import enum import warnings from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING from ..utils import add_end_docstrings, is_tf_available from .base import PIPELINE_INIT_ARGS, Pipeline if is_tf_available(): import tensorflow as tf class A__ ( enum.Enum): A_ : List[Any] = 0 A_ : Dict = 1 A_ : Union[str, Any] = 2 @add_end_docstrings(_lowerCamelCase) class A__ ( _lowerCamelCase): A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n ' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) self.check_model_type( TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING ) if "prefix" not in self._preprocess_params: # This is very specific. The logic is quite complex and needs to be done # as a "default". # It also defines both some preprocess_kwargs and generate_kwargs # which is why we cannot put them in their respective methods. __lowerCAmelCase : Any = None if self.model.config.prefix is not None: __lowerCAmelCase : str = self.model.config.prefix if prefix is None and self.model.__class__.__name__ in [ "XLNetLMHeadModel", "TransfoXLLMHeadModel", "TFXLNetLMHeadModel", "TFTransfoXLLMHeadModel", ]: # For XLNet and TransformerXL we add an article to the prompt to give more state to the model. __lowerCAmelCase : Tuple = self.XL_PREFIX if prefix is not None: # Recalculate some generate_kwargs linked to prefix. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params ) __lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params} __lowerCAmelCase : List[str] = {**self._forward_params, **forward_params} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : Optional[int] = {} if prefix is not None: __lowerCAmelCase : Union[str, Any] = prefix if prefix: __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1] if handle_long_generation is not None: if handle_long_generation not in {"hole"}: raise ValueError( f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected" ' [None, \'hole\']' ) __lowerCAmelCase : int = handle_long_generation preprocess_params.update(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = generate_kwargs __lowerCAmelCase : List[Any] = {} if return_full_text is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_full_text`' ) if return_tensors is not None: raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT if return_tensors is not None and return_type is None: if return_text is not None: raise ValueError('`return_text` is mutually exclusive with `return_tensors`' ) __lowerCAmelCase : List[Any] = ReturnType.TENSORS if return_type is not None: __lowerCAmelCase : Optional[Any] = return_type if clean_up_tokenization_spaces is not None: __lowerCAmelCase : Tuple = clean_up_tokenization_spaces if stop_sequence is not None: __lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) if len(_SCREAMING_SNAKE_CASE ) > 1: warnings.warn( 'Stopping on a multiple token sequence is not yet supported on transformers. The first token of' ' the stop sequence will be used as the stop sequence string in the interim.' ) __lowerCAmelCase : Optional[Any] = stop_sequence_ids[0] return preprocess_params, forward_params, postprocess_params def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): # Parse arguments if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]: kwargs.update({'add_space_before_punct_symbol': True} ) return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = self.tokenizer( prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework ) __lowerCAmelCase : Optional[Any] = prompt_text if handle_long_generation == "hole": __lowerCAmelCase : str = inputs['input_ids'].shape[-1] if "max_new_tokens" in generate_kwargs: __lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens'] else: __lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len if new_tokens < 0: raise ValueError('We cannot infer how many new tokens are expected' ) if cur_len + new_tokens > self.tokenizer.model_max_length: __lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens if keep_length <= 0: raise ValueError( 'We cannot use `hole` to handle this generation the number of desired tokens exceeds the' ' models max length' ) __lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:] if "attention_mask" in inputs: __lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:] return inputs def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): __lowerCAmelCase : str = model_inputs['input_ids'] __lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE ) # Allow empty prompts if input_ids.shape[1] == 0: __lowerCAmelCase : Dict = None __lowerCAmelCase : str = None __lowerCAmelCase : Tuple = 1 else: __lowerCAmelCase : Any = input_ids.shape[0] __lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' ) # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline. __lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 ) if prefix_length > 0: __lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].max_new_tokens is not None ) if not has_max_new_tokens: __lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length generate_kwargs["max_length"] += prefix_length __lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or ( 'generation_config' in generate_kwargs and generate_kwargs['generation_config'].min_new_tokens is not None ) if not has_min_new_tokens and "min_length" in generate_kwargs: generate_kwargs["min_length"] += prefix_length # BS x SL __lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = generated_sequence.shape[0] if self.framework == "pt": __lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] ) elif self.framework == "tf": __lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) ) return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ): __lowerCAmelCase : Any = model_outputs['generated_sequence'][0] __lowerCAmelCase : Tuple = model_outputs['input_ids'] __lowerCAmelCase : Any = model_outputs['prompt_text'] __lowerCAmelCase : int = generated_sequence.numpy().tolist() __lowerCAmelCase : Union[str, Any] = [] for sequence in generated_sequence: if return_type == ReturnType.TENSORS: __lowerCAmelCase : int = {'generated_token_ids': sequence} elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}: # Decode text __lowerCAmelCase : Any = self.tokenizer.decode( _SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used if input_ids is None: __lowerCAmelCase : Optional[Any] = 0 else: __lowerCAmelCase : Any = len( self.tokenizer.decode( input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) ) if return_type == ReturnType.FULL_TEXT: __lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:] else: __lowerCAmelCase : int = text[prompt_length:] __lowerCAmelCase : Dict = {'generated_text': all_text} records.append(_SCREAMING_SNAKE_CASE ) return records
86
1
"""simple docstring""" import os from typing import List, Optional, Union from ...tokenization_utils import PreTrainedTokenizer from ...tokenization_utils_base import AddedToken from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """vocab.txt"""} lowerCamelCase__ = { """vocab_file""": { """facebook/esm2_t6_8M_UR50D""": """https://huggingface.co/facebook/esm2_t6_8M_UR50D/resolve/main/vocab.txt""", """facebook/esm2_t12_35M_UR50D""": """https://huggingface.co/facebook/esm2_t12_35M_UR50D/resolve/main/vocab.txt""", }, } lowerCamelCase__ = { """facebook/esm2_t6_8M_UR50D""": 1_024, """facebook/esm2_t12_35M_UR50D""": 1_024, } def __lowerCAmelCase (_UpperCamelCase ): with open(_UpperCamelCase , 'r' ) as f: __lowerCAmelCase : Dict = f.read().splitlines() return [l.strip() for l in lines] class A__ ( _lowerCamelCase): A_ : str = VOCAB_FILES_NAMES A_ : List[str] = PRETRAINED_VOCAB_FILES_MAP A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[int] = ['input_ids', 'attention_mask'] def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<cls>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="<eos>" , **_SCREAMING_SNAKE_CASE , ): super().__init__(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = load_vocab_file(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = dict(enumerate(self.all_tokens ) ) __lowerCAmelCase : List[str] = {tok: ind for ind, tok in enumerate(self.all_tokens )} __lowerCAmelCase : List[Any] = unk_token __lowerCAmelCase : int = cls_token __lowerCAmelCase : Dict = pad_token __lowerCAmelCase : List[Any] = mask_token __lowerCAmelCase : int = eos_token __lowerCAmelCase : Union[str, Any] = self.all_tokens self._create_trie(self.unique_no_split_tokens ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ): return text.split() def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=False ): return len(self._id_to_token ) def __lowerCamelCase ( self ): return {token: i for i, token in enumerate(self.all_tokens )} def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): return self._token_to_id.get(_SCREAMING_SNAKE_CASE , self._token_to_id.get(self.unk_token ) ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): return self._id_to_token.get(_SCREAMING_SNAKE_CASE , self.unk_token ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): __lowerCAmelCase : List[Any] = [self.cls_token_id] __lowerCAmelCase : Optional[Any] = [self.eos_token_id] # No sep token in ESM vocabulary if token_ids_a is None: if self.eos_token_id is None: return cls + token_ids_a else: return cls + token_ids_a + sep elif self.eos_token_id is None: raise ValueError('Cannot tokenize multiple sequences when EOS token is not set!' ) return cls + token_ids_a + sep + token_ids_a + sep # Multiple inputs always have an EOS token def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ): if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if token in self.all_special_ids else 0 for token in token_ids_a] __lowerCAmelCase : Any = [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] if token_ids_a is not None: mask += [0] * len(_SCREAMING_SNAKE_CASE ) + [1] return mask def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = os.path.join(_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + 'vocab.txt' ) with open(_SCREAMING_SNAKE_CASE , 'w' ) as f: f.write('\n'.join(self.all_tokens ) ) return (vocab_file,) @property def __lowerCamelCase ( self ): return self.get_vocab_size(with_added_tokens=_SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False ): return super()._add_tokens(_SCREAMING_SNAKE_CASE , special_tokens=_SCREAMING_SNAKE_CASE )
86
"""simple docstring""" from __future__ import annotations import bisect def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): if hi < 0: __lowerCAmelCase : Tuple = len(_UpperCamelCase ) while lo < hi: __lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: __lowerCAmelCase : int = mid + 1 else: __lowerCAmelCase : List[str] = mid return lo def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): if hi < 0: __lowerCAmelCase : List[Any] = len(_UpperCamelCase ) while lo < hi: __lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: __lowerCAmelCase : Dict = mid + 1 else: __lowerCAmelCase : str = mid return lo def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ): sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : List[Any] = 0 __lowerCAmelCase : int = len(_UpperCamelCase ) - 1 while left <= right: __lowerCAmelCase : List[Any] = left + (right - left) // 2 __lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: __lowerCAmelCase : Tuple = midpoint - 1 else: __lowerCAmelCase : str = midpoint + 1 return None def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): __lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase ) if index != len(_UpperCamelCase ) and sorted_collection[index] == item: return index return None def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): if right < left: return None __lowerCAmelCase : List[str] = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 ) else: return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase ) if __name__ == "__main__": lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip() lowerCamelCase__ = sorted(int(item) for item in user_input.split(""",""")) lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n""")) lowerCamelCase__ = binary_search(collection, target) if result is None: print(f'{target} was not found in {collection}.') else: print(f'{target} was found at position {result} in {collection}.')
86
1
"""simple docstring""" import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ): __lowerCAmelCase : List[Any] = AutoConfig.from_pretrained(_UpperCamelCase , **_UpperCamelCase ) __lowerCAmelCase : Tuple = AutoModelForSeqaSeqLM.from_config(_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) AutoTokenizer.from_pretrained(_UpperCamelCase ).save_pretrained(_UpperCamelCase ) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
86
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available, is_torch_available from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow if is_tf_available(): from transformers import ( AutoConfig, BertConfig, GPTaConfig, TaConfig, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeqaSeqLM, TFAutoModelForSequenceClassification, TFAutoModelWithLMHead, TFBertForMaskedLM, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertModel, TFGPTaLMHeadModel, TFRobertaForMaskedLM, TFTaForConditionalGeneration, ) from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST if is_torch_available(): from transformers import ( AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeqaSeqLM, AutoModelForSequenceClassification, AutoModelWithLMHead, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertModel, GPTaLMHeadModel, RobertaForMaskedLM, TaForConditionalGeneration, ) @is_pt_tf_cross_test class A__ ( unittest.TestCase): @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained( _SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: for model_name in ["bert-base-uncased"]: __lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) def __lowerCamelCase ( self ): __lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) __lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual(model.num_parameters() , 1_44_10 ) self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
86
1
"""simple docstring""" def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ): if mass < 0: raise ValueError('The mass of a body cannot be negative' ) return 0.5 * mass * abs(_UpperCamelCase ) * abs(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
86
"""simple docstring""" from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCamelCase__ = """ Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\") >>> pipe_prior.to(\"cuda\") >>> prompt = \"red cat, 4k photo\" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\") >>> pipe.to(\"cuda\") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save(\"cat.png\") ``` """ def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ): __lowerCAmelCase : Dict = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 __lowerCAmelCase : List[str] = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class A__ ( _lowerCamelCase): def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ): super().__init__() self.register_modules( text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if latents is None: __lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE ) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" ) __lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma return latents def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ): __lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1 # get prompt text embeddings __lowerCAmelCase : Dict = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) __lowerCAmelCase : Tuple = text_inputs.input_ids __lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] ) logger.warning( 'The following part of your input was truncated because CLIP can only handle sequences up to' f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) __lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase : List[str] if negative_prompt is None: __lowerCAmelCase : Union[str, Any] = [''] * batch_size elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !=" f" {type(_SCREAMING_SNAKE_CASE )}." ) elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(_SCREAMING_SNAKE_CASE ): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" ' the batch size of `prompt`.' ) else: __lowerCAmelCase : Optional[int] = negative_prompt __lowerCAmelCase : Tuple = self.tokenizer( _SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , ) __lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder( input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE ) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1] __lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1] __lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 ) __lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 ) __lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] ) __lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] ) __lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] ) return prompt_embeds, text_encoder_hidden_states, text_mask def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ): if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) __lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" ) __lowerCAmelCase : List[Any] = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ): if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) __lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowerCAmelCase : Any = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: __lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE ) if self.safety_checker is not None: __lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE ) # We'll offload the last model manually. __lowerCAmelCase : Optional[Any] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def __lowerCamelCase ( self ): if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(_SCREAMING_SNAKE_CASE ) def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ): if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : List[str] = 1 elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE ) else: raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" ) __lowerCAmelCase : Dict = self._execution_device __lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt __lowerCAmelCase : Optional[int] = guidance_scale > 1.0 __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 ) if do_classifier_free_guidance: __lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 ) __lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to( dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE ) self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.scheduler.timesteps __lowerCAmelCase : int = self.unet.config.in_channels __lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor ) # create initial latent __lowerCAmelCase : str = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , ) for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ): # expand the latents if we are doing classifier free guidance __lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds} __lowerCAmelCase : Optional[Any] = self.unet( sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0] if do_classifier_free_guidance: __lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 ) __lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 ) __lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 ) __lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowerCAmelCase : List[str] = self.scheduler.step( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample # post-processing __lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" ) if output_type in ["np", "pil"]: __lowerCAmelCase : List[str] = image * 0.5 + 0.5 __lowerCAmelCase : Dict = image.clamp(0 , 1 ) __lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE ) if not return_dict: return (image,) return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE )
86
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor import transformers from transformers import ( CONFIG_MAPPING, IMAGE_PROCESSOR_MAPPING, MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING, AutoConfig, AutoImageProcessor, AutoModelForMaskedImageModeling, HfArgumentParser, Trainer, TrainingArguments, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCamelCase__ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("""4.31.0""") require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""") lowerCamelCase__ = list(MODEL_FOR_MASKED_IMAGE_MODELING_MAPPING.keys()) lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class A__ : A_ : Optional[str] = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'}) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={'help': 'The column name of the images in the files. If not set, will try to use \'image\' or \'img\'.'} , ) A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the training data.'}) A_ : Optional[str] = field(default=_lowerCamelCase , metadata={'help': 'A folder containing the validation data.'}) A_ : Optional[float] = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'}) A_ : int = field(default=3_2 , metadata={'help': 'The size of the square patches to use for masking.'}) A_ : float = field( default=0.6 , metadata={'help': 'Percentage of patches to mask.'} , ) A_ : Optional[int] = field( default=_lowerCamelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) A_ : Optional[int] = field( default=_lowerCamelCase , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[int] = {} if self.train_dir is not None: __lowerCAmelCase : Dict = self.train_dir if self.validation_dir is not None: __lowerCAmelCase : Dict = self.validation_dir __lowerCAmelCase : str = data_files if data_files else None @dataclass class A__ : A_ : str = field( default=_lowerCamelCase , metadata={ 'help': ( 'The model checkpoint for weights initialization. Can be a local path to a pytorch_model.bin or a ' 'checkpoint identifier on the hub. ' 'Don\'t set if you want to train a model from scratch.' ) } , ) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_lowerCamelCase)} , ) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'}) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) A_ : Optional[str] = field( default=_lowerCamelCase , metadata={'help': 'Where do you want to store (cache) the pretrained models/datasets downloaded from the hub'} , ) A_ : str = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) A_ : str = field(default=_lowerCamelCase , metadata={'help': 'Name or path of preprocessor config.'}) A_ : bool = field( default=_lowerCamelCase , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) A_ : Optional[int] = field( default=_lowerCamelCase , metadata={ 'help': ( 'The size (resolution) of each image. If not specified, will use `image_size` of the configuration.' ) } , ) A_ : Optional[int] = field( default=_lowerCamelCase , metadata={ 'help': ( 'The size (resolution) of each patch. If not specified, will use `patch_size` of the configuration.' ) } , ) A_ : Optional[int] = field( default=_lowerCamelCase , metadata={'help': 'Stride to use for the encoder.'} , ) class A__ : def __init__( self , _SCREAMING_SNAKE_CASE=1_92 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.6 ): __lowerCAmelCase : List[str] = input_size __lowerCAmelCase : str = mask_patch_size __lowerCAmelCase : Tuple = model_patch_size __lowerCAmelCase : int = mask_ratio if self.input_size % self.mask_patch_size != 0: raise ValueError('Input size must be divisible by mask patch size' ) if self.mask_patch_size % self.model_patch_size != 0: raise ValueError('Mask patch size must be divisible by model patch size' ) __lowerCAmelCase : str = self.input_size // self.mask_patch_size __lowerCAmelCase : Dict = self.mask_patch_size // self.model_patch_size __lowerCAmelCase : Union[str, Any] = self.rand_size**2 __lowerCAmelCase : Any = int(np.ceil(self.token_count * self.mask_ratio ) ) def __call__( self ): __lowerCAmelCase : List[Any] = np.random.permutation(self.token_count )[: self.mask_count] __lowerCAmelCase : Union[str, Any] = np.zeros(self.token_count , dtype=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = 1 __lowerCAmelCase : Any = mask.reshape((self.rand_size, self.rand_size) ) __lowerCAmelCase : int = mask.repeat(self.scale , axis=0 ).repeat(self.scale , axis=1 ) return torch.tensor(mask.flatten() ) def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : Optional[int] = torch.stack([example['pixel_values'] for example in examples] ) __lowerCAmelCase : int = torch.stack([example['mask'] for example in examples] ) return {"pixel_values": pixel_values, "bool_masked_pos": mask} def __lowerCAmelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowerCAmelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mim' , _UpperCamelCase , _UpperCamelCase ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() __lowerCAmelCase : Dict = training_args.get_process_log_level() logger.setLevel(_UpperCamelCase ) transformers.utils.logging.set_verbosity(_UpperCamelCase ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + F"distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}" ) logger.info(F"Training/evaluation parameters {training_args}" ) # Detecting last checkpoint. __lowerCAmelCase : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: __lowerCAmelCase : Optional[Any] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"Output directory ({training_args.output_dir}) already exists and is not empty. " 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. __lowerCAmelCase : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. __lowerCAmelCase : Dict = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , _UpperCamelCase ) and data_args.train_val_split > 0.0: __lowerCAmelCase : int = ds['train'].train_test_split(data_args.train_val_split ) __lowerCAmelCase : Optional[Any] = split['train'] __lowerCAmelCase : Dict = split['test'] # Create config # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowerCAmelCase : List[str] = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name_or_path: __lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name_or_path , **_UpperCamelCase ) elif model_args.model_name_or_path: __lowerCAmelCase : Dict = AutoConfig.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase ) else: __lowerCAmelCase : Optional[int] = CONFIG_MAPPING[model_args.model_type]() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F"Overriding config: {model_args.config_overrides}" ) config.update_from_string(model_args.config_overrides ) logger.info(F"New config: {config}" ) # make sure the decoder_type is "simmim" (only relevant for BEiT) if hasattr(_UpperCamelCase , 'decoder_type' ): __lowerCAmelCase : List[str] = 'simmim' # adapt config __lowerCAmelCase : List[str] = model_args.image_size if model_args.image_size is not None else config.image_size __lowerCAmelCase : Tuple = model_args.patch_size if model_args.patch_size is not None else config.patch_size __lowerCAmelCase : Dict = ( model_args.encoder_stride if model_args.encoder_stride is not None else config.encoder_stride ) config.update( { 'image_size': model_args.image_size, 'patch_size': model_args.patch_size, 'encoder_stride': model_args.encoder_stride, } ) # create image processor if model_args.image_processor_name: __lowerCAmelCase : Any = AutoImageProcessor.from_pretrained(model_args.image_processor_name , **_UpperCamelCase ) elif model_args.model_name_or_path: __lowerCAmelCase : Dict = AutoImageProcessor.from_pretrained(model_args.model_name_or_path , **_UpperCamelCase ) else: __lowerCAmelCase : Optional[Any] = { conf.model_type: image_processor_class for conf, image_processor_class in IMAGE_PROCESSOR_MAPPING.items() } __lowerCAmelCase : Optional[Any] = IMAGE_PROCESSOR_TYPES[model_args.model_type]() # create model if model_args.model_name_or_path: __lowerCAmelCase : Optional[Any] = AutoModelForMaskedImageModeling.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=_UpperCamelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) __lowerCAmelCase : Optional[Any] = AutoModelForMaskedImageModeling.from_config(_UpperCamelCase ) if training_args.do_train: __lowerCAmelCase : Any = ds['train'].column_names else: __lowerCAmelCase : List[str] = ds['validation'].column_names if data_args.image_column_name is not None: __lowerCAmelCase : List[Any] = data_args.image_column_name elif "image" in column_names: __lowerCAmelCase : Dict = 'image' elif "img" in column_names: __lowerCAmelCase : Optional[int] = 'img' else: __lowerCAmelCase : Dict = column_names[0] # transformations as done in original SimMIM paper # source: https://github.com/microsoft/SimMIM/blob/main/data/data_simmim.py __lowerCAmelCase : Optional[Any] = Compose( [ Lambda(lambda _UpperCamelCase : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(model_args.image_size , scale=(0.67, 1.0) , ratio=(3.0 / 4.0, 4.0 / 3.0) ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) # create mask generator __lowerCAmelCase : List[str] = MaskGenerator( input_size=model_args.image_size , mask_patch_size=data_args.mask_patch_size , model_patch_size=model_args.patch_size , mask_ratio=data_args.mask_ratio , ) def preprocess_images(_UpperCamelCase ): __lowerCAmelCase : List[Any] = [transforms(_UpperCamelCase ) for image in examples[image_column_name]] __lowerCAmelCase : int = [mask_generator() for i in range(len(examples[image_column_name] ) )] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: __lowerCAmelCase : Optional[Any] = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(_UpperCamelCase ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: __lowerCAmelCase : Union[str, Any] = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(_UpperCamelCase ) # Initialize our trainer __lowerCAmelCase : List[str] = Trainer( model=_UpperCamelCase , args=_UpperCamelCase , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=_UpperCamelCase , data_collator=_UpperCamelCase , ) # Training if training_args.do_train: __lowerCAmelCase : Optional[int] = None if training_args.resume_from_checkpoint is not None: __lowerCAmelCase : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: __lowerCAmelCase : str = last_checkpoint __lowerCAmelCase : Union[str, Any] = trainer.train(resume_from_checkpoint=_UpperCamelCase ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: __lowerCAmelCase : List[str] = trainer.evaluate() trainer.log_metrics('eval' , _UpperCamelCase ) trainer.save_metrics('eval' , _UpperCamelCase ) # Write model card and (optionally) push to hub __lowerCAmelCase : int = { 'finetuned_from': model_args.model_name_or_path, 'tasks': 'masked-image-modeling', 'dataset': data_args.dataset_name, 'tags': ['masked-image-modeling'], } if training_args.push_to_hub: trainer.push_to_hub(**_UpperCamelCase ) else: trainer.create_model_card(**_UpperCamelCase ) if __name__ == "__main__": main()
86
"""simple docstring""" import unittest from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers @require_sentencepiece @slow # see https://github.com/huggingface/transformers/issues/11457 class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = BarthezTokenizer A_ : Tuple = BarthezTokenizerFast A_ : Dict = True A_ : List[str] = True def __lowerCamelCase ( self ): super().setUp() __lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' ) tokenizer.save_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = tokenizer def __lowerCamelCase ( self ): __lowerCAmelCase : Optional[Any] = '<pad>' __lowerCAmelCase : Union[str, Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): __lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 ) def __lowerCamelCase ( self ): self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 ) @require_torch def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.'] __lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2] __lowerCAmelCase : Optional[int] = self.tokenizer( _SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' ) self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) self.assertEqual((2, 6) , batch.input_ids.shape ) self.assertEqual((2, 6) , batch.attention_mask.shape ) __lowerCAmelCase : List[str] = batch.input_ids.tolist()[0] self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self ): if not self.test_rust_tokenizer: return __lowerCAmelCase : Tuple = self.get_tokenizer() __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.' __lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[int] = self.get_rust_tokenizer() __lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE ) self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) @slow def __lowerCamelCase ( self ): # fmt: off __lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # moussaKam/mbarthez is a french model. So we also use french texts. __lowerCAmelCase : Union[str, Any] = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , )
86
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_mobilebert import MobileBertTokenizer lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} lowerCamelCase__ = { """vocab_file""": {"""mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"""}, """tokenizer_file""": { """mobilebert-uncased""": """https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json""" }, } lowerCamelCase__ = {"""mobilebert-uncased""": 512} lowerCamelCase__ = {} class A__ ( _lowerCamelCase): A_ : Optional[int] = VOCAB_FILES_NAMES A_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP A_ : List[Any] = PRETRAINED_INIT_CONFIGURATION A_ : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES A_ : Optional[Any] = MobileBertTokenizer def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ): super().__init__( _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' , _SCREAMING_SNAKE_CASE ) != do_lower_case or normalizer_state.get('strip_accents' , _SCREAMING_SNAKE_CASE ) != strip_accents or normalizer_state.get('handle_chinese_chars' , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars ): __lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) ) __lowerCAmelCase : List[Any] = do_lower_case __lowerCAmelCase : int = strip_accents __lowerCAmelCase : List[Any] = tokenize_chinese_chars __lowerCAmelCase : str = normalizer_class(**_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : str = do_lower_case def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ): __lowerCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): __lowerCAmelCase : Any = [self.sep_token_id] __lowerCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ): __lowerCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE ) return tuple(_SCREAMING_SNAKE_CASE )
86
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { """sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class A__ ( _lowerCamelCase): A_ : Optional[int] = 'poolformer' def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ): __lowerCAmelCase : int = num_channels __lowerCAmelCase : str = patch_size __lowerCAmelCase : Optional[Any] = stride __lowerCAmelCase : Optional[int] = padding __lowerCAmelCase : List[Any] = pool_size __lowerCAmelCase : int = hidden_sizes __lowerCAmelCase : str = mlp_ratio __lowerCAmelCase : Optional[int] = depths __lowerCAmelCase : str = patch_sizes __lowerCAmelCase : str = strides __lowerCAmelCase : Optional[int] = num_encoder_blocks __lowerCAmelCase : Any = drop_path_rate __lowerCAmelCase : Any = hidden_act __lowerCAmelCase : Dict = use_layer_scale __lowerCAmelCase : Union[str, Any] = layer_scale_init_value __lowerCAmelCase : Dict = initializer_range super().__init__(**_SCREAMING_SNAKE_CASE ) class A__ ( _lowerCamelCase): A_ : List[str] = version.parse('1.11') @property def __lowerCamelCase ( self ): return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowerCamelCase ( self ): return 2E-3
86
1
"""simple docstring""" from collections.abc import Callable from math import pi, sqrt from random import uniform from statistics import mean def __lowerCAmelCase (_UpperCamelCase ): # A local function to see if a dot lands in the circle. def is_in_circle(_UpperCamelCase , _UpperCamelCase ) -> bool: __lowerCAmelCase : str = sqrt((x**2) + (y**2) ) # Our circle has a radius of 1, so a distance # greater than 1 would land outside the circle. return distance_from_centre <= 1 # The proportion of guesses that landed in the circle __lowerCAmelCase : Optional[int] = mean( int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) ) for _ in range(_UpperCamelCase ) ) # The ratio of the area for circle to square is pi/4. __lowerCAmelCase : Dict = proportion * 4 print(F"The estimated value of pi is {pi_estimate}" ) print(F"The numpy value of pi is {pi}" ) print(F"The total error is {abs(pi - pi_estimate )}" ) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 , ): return mean( function_to_integrate(uniform(_UpperCamelCase , _UpperCamelCase ) ) for _ in range(_UpperCamelCase ) ) * (max_value - min_value) def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 0.0 , _UpperCamelCase = 1.0 ): def identity_function(_UpperCamelCase ) -> float: return x __lowerCAmelCase : Dict = area_under_curve_estimator( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : int = (max_value * max_value - min_value * min_value) / 2 print('******************' ) print(F"Estimating area under y=x where x varies from {min_value} to {max_value}" ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {expected_value}" ) print(F"Total error is {abs(estimated_value - expected_value )}" ) print('******************' ) def __lowerCAmelCase (_UpperCamelCase ): def function_to_integrate(_UpperCamelCase ) -> float: return sqrt(4.0 - x * x ) __lowerCAmelCase : Tuple = area_under_curve_estimator( _UpperCamelCase , _UpperCamelCase , 0.0 , 2.0 ) print('******************' ) print('Estimating pi using area_under_curve_estimator' ) print(F"Estimated value is {estimated_value}" ) print(F"Expected value is {pi}" ) print(F"Total error is {abs(estimated_value - pi )}" ) print('******************' ) if __name__ == "__main__": import doctest doctest.testmod()
86
"""simple docstring""" import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class A__ ( _lowerCamelCase , unittest.TestCase): A_ : Union[str, Any] = DiTPipeline A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS A_ : List[Any] = PipelineTesterMixin.required_optional_params - { 'latents', 'num_images_per_prompt', 'callback', 'callback_steps', } A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS A_ : Tuple = False def __lowerCamelCase ( self ): torch.manual_seed(0 ) __lowerCAmelCase : List[str] = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , ) __lowerCAmelCase : str = AutoencoderKL() __lowerCAmelCase : Union[str, Any] = DDIMScheduler() __lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler} return components def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ): if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ): __lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = { 'class_labels': [1], 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def __lowerCamelCase ( self ): __lowerCAmelCase : List[str] = 'cpu' __lowerCAmelCase : Any = self.get_dummy_components() __lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE ) pipe.to(_SCREAMING_SNAKE_CASE ) pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images __lowerCAmelCase : List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) __lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] ) __lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 ) def __lowerCamelCase ( self ): self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , ) def __lowerCamelCase ( self ): self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class A__ ( unittest.TestCase): def __lowerCamelCase ( self ): super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCamelCase ( self ): __lowerCAmelCase : Dict = torch.manual_seed(0 ) __lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' ) pipe.to('cuda' ) __lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf'] __lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = load_numpy( f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" ) assert np.abs((expected_image - image).max() ) < 1E-2 def __lowerCamelCase ( self ): __lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' ) __lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to('cuda' ) __lowerCAmelCase : Dict = ['vase', 'umbrella'] __lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE ) __lowerCAmelCase : Any = torch.manual_seed(0 ) __lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Dict = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' f"/dit/{word}_512.npy" ) assert np.abs((expected_image - image).max() ) < 1E-1
86
1