code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import torch from ...utils import logging from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert logging.set_verbosity_info() def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Any ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = AlbertConfig.from_json_file(_UpperCamelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) __UpperCAmelCase : str = AlbertForPreTraining(_UpperCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_albert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--albert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained ALBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" UpperCAmelCase : List[str] = 256 # Modulus to hash a string UpperCAmelCase : Optional[int] = 100_0003 def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> bool: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : str = len(_UpperCamelCase ) if p_len > t_len: return False __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : int = 1 # Calculating the hash of pattern and substring of text for i in range(_UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus __UpperCAmelCase : Optional[int] = (ord(text[i] ) + text_hash * alphabet_size) % modulus if i == p_len - 1: continue __UpperCAmelCase : str = (modulus_power * alphabet_size) % modulus for i in range(0 , t_len - p_len + 1 ): if text_hash == p_hash and text[i : i + p_len] == pattern: return True if i == t_len - p_len: continue # Calculate the https://en.wikipedia.org/wiki/Rolling_hash __UpperCAmelCase : int = ( (text_hash - ord(text[i] ) * modulus_power) * alphabet_size + ord(text[i + p_len] ) ) % modulus return False def lowerCamelCase ( ) -> None: '''simple docstring''' __UpperCAmelCase : int = """abc1abc12""" __UpperCAmelCase : Any = """alskfjaldsabc1abc1abc12k23adsfabcabc""" __UpperCAmelCase : Union[str, Any] = """alskfjaldsk23adsfabcabc""" assert rabin_karp(_UpperCamelCase , _UpperCamelCase ) and not rabin_karp(_UpperCamelCase , _UpperCamelCase ) # Test 2) __UpperCAmelCase : Optional[Any] = """ABABX""" __UpperCAmelCase : Union[str, Any] = """ABABZABABYABABX""" assert rabin_karp(_UpperCamelCase , _UpperCamelCase ) # Test 3) __UpperCAmelCase : int = """AAAB""" __UpperCAmelCase : Any = """ABAAAAAB""" assert rabin_karp(_UpperCamelCase , _UpperCamelCase ) # Test 4) __UpperCAmelCase : Tuple = """abcdabcy""" __UpperCAmelCase : Optional[Any] = """abcxabcdabxabcdabcdabcy""" assert rabin_karp(_UpperCamelCase , _UpperCamelCase ) # Test 5) __UpperCAmelCase : Any = """Lü""" __UpperCAmelCase : Optional[int] = """Lüsai""" assert rabin_karp(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Any = """Lue""" assert not rabin_karp(_UpperCamelCase , _UpperCamelCase ) print("""Success.""" ) if __name__ == "__main__": test_rabin_karp()
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[int] = """ZinengTang/tvlt-base""" __UpperCAmelCase : Optional[int] = tempfile.mkdtemp() def lowerCamelCase__ ( self : Union[str, Any] , **UpperCamelCase : int ): '''simple docstring''' return TvltImageProcessor.from_pretrained(self.checkpoint , **UpperCamelCase ) def lowerCamelCase__ ( self : int , **UpperCamelCase : Any ): '''simple docstring''' return TvltFeatureExtractor.from_pretrained(self.checkpoint , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.get_image_processor() __UpperCAmelCase : int = self.get_feature_extractor() __UpperCAmelCase : int = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase ) processor.save_pretrained(self.tmpdirname ) __UpperCAmelCase : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , UpperCamelCase ) self.assertIsInstance(processor.image_processor , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : int = self.get_image_processor() __UpperCAmelCase : int = self.get_feature_extractor() __UpperCAmelCase : Tuple = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase ) __UpperCAmelCase : Tuple = np.ones([12_000] ) __UpperCAmelCase : List[Any] = feature_extractor(UpperCamelCase , return_tensors="""np""" ) __UpperCAmelCase : Optional[Any] = processor(audio=UpperCamelCase , return_tensors="""np""" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.get_image_processor() __UpperCAmelCase : Optional[int] = self.get_feature_extractor() __UpperCAmelCase : List[Any] = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase ) __UpperCAmelCase : str = np.ones([3, 224, 224] ) __UpperCAmelCase : Any = image_processor(UpperCamelCase , return_tensors="""np""" ) __UpperCAmelCase : List[Any] = processor(images=UpperCamelCase , return_tensors="""np""" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1e-2 ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.get_image_processor() __UpperCAmelCase : Dict = self.get_feature_extractor() __UpperCAmelCase : Tuple = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase ) __UpperCAmelCase : Dict = np.ones([12_000] ) __UpperCAmelCase : Union[str, Any] = np.ones([3, 224, 224] ) __UpperCAmelCase : Dict = processor(audio=UpperCamelCase , images=UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) , ["""audio_values""", """audio_mask""", """pixel_values""", """pixel_mask"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase ): processor() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = self.get_image_processor() __UpperCAmelCase : str = self.get_feature_extractor() __UpperCAmelCase : Tuple = TvltProcessor(image_processor=UpperCamelCase , feature_extractor=UpperCamelCase ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="""`processor` and `image_processor`+`feature_extractor` model input names do not match""" , )
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" import os import sys import unittest UpperCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, 'utils')) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) UpperCAmelCase : Union[str, Any] = os.path.join('tests', 'models', 'bert', 'test_modeling_bert.py') UpperCAmelCase : Dict = os.path.join('tests', 'models', 'blip', 'test_modeling_blip.py') class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = get_test_to_tester_mapping(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = get_test_to_tester_mapping(UpperCamelCase ) __UpperCAmelCase : Optional[int] = {"""BertModelTest""": """BertModelTester"""} __UpperCAmelCase : Dict = { """BlipModelTest""": """BlipModelTester""", """BlipTextImageModelTest""": """BlipTextImageModelsModelTester""", """BlipTextModelTest""": """BlipTextModelTester""", """BlipTextRetrievalModelTest""": """BlipTextRetrievalModelTester""", """BlipVQAModelTest""": """BlipVQAModelTester""", """BlipVisionModelTest""": """BlipVisionModelTester""", } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Any = get_model_to_test_mapping(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = get_model_to_test_mapping(UpperCamelCase ) __UpperCAmelCase : Any = { """BertForMaskedLM""": ["""BertModelTest"""], """BertForMultipleChoice""": ["""BertModelTest"""], """BertForNextSentencePrediction""": ["""BertModelTest"""], """BertForPreTraining""": ["""BertModelTest"""], """BertForQuestionAnswering""": ["""BertModelTest"""], """BertForSequenceClassification""": ["""BertModelTest"""], """BertForTokenClassification""": ["""BertModelTest"""], """BertLMHeadModel""": ["""BertModelTest"""], """BertModel""": ["""BertModelTest"""], } __UpperCAmelCase : Any = { """BlipForConditionalGeneration""": ["""BlipTextImageModelTest"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTest"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTest"""], """BlipModel""": ["""BlipModelTest"""], """BlipTextModel""": ["""BlipTextModelTest"""], """BlipVisionModel""": ["""BlipVisionModelTest"""], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = get_model_to_tester_mapping(UpperCamelCase ) __UpperCAmelCase : Any = get_model_to_tester_mapping(UpperCamelCase ) __UpperCAmelCase : Any = { """BertForMaskedLM""": ["""BertModelTester"""], """BertForMultipleChoice""": ["""BertModelTester"""], """BertForNextSentencePrediction""": ["""BertModelTester"""], """BertForPreTraining""": ["""BertModelTester"""], """BertForQuestionAnswering""": ["""BertModelTester"""], """BertForSequenceClassification""": ["""BertModelTester"""], """BertForTokenClassification""": ["""BertModelTester"""], """BertLMHeadModel""": ["""BertModelTester"""], """BertModel""": ["""BertModelTester"""], } __UpperCAmelCase : Tuple = { """BlipForConditionalGeneration""": ["""BlipTextImageModelsModelTester"""], """BlipForImageTextRetrieval""": ["""BlipTextRetrievalModelTester"""], """BlipForQuestionAnswering""": ["""BlipVQAModelTester"""], """BlipModel""": ["""BlipModelTester"""], """BlipTextModel""": ["""BlipTextModelTester"""], """BlipVisionModel""": ["""BlipVisionModelTester"""], } self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(get_test_info.to_json(UpperCamelCase ) , UpperCamelCase )
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" import numpy as np import torch from torch.utils.data import Dataset, IterableDataset from ..utils.generic import ModelOutput class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : int = dataset __UpperCAmelCase : Union[str, Any] = process __UpperCAmelCase : List[str] = params def __len__( self : List[str] ): '''simple docstring''' return len(self.dataset ) def __getitem__( self : str , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.dataset[i] __UpperCAmelCase : Optional[Any] = self.process(UpperCamelCase , **self.params ) return processed class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int]=None ): '''simple docstring''' __UpperCAmelCase : Optional[int] = loader __UpperCAmelCase : Dict = infer __UpperCAmelCase : Any = params if loader_batch_size == 1: # Let's spare some time by deactivating altogether __UpperCAmelCase : List[str] = None __UpperCAmelCase : Optional[int] = loader_batch_size # Internal bookkeeping __UpperCAmelCase : Dict = None __UpperCAmelCase : Any = None def __len__( self : int ): '''simple docstring''' return len(self.loader ) def __iter__( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = iter(self.loader ) return self def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' if isinstance(self._loader_batch_data , torch.Tensor ): # Batch data is simple tensor, just fetch the slice __UpperCAmelCase : int = self._loader_batch_data[self._loader_batch_index] else: # Batch data is assumed to be BaseModelOutput (or dict) __UpperCAmelCase : Union[str, Any] = {} for k, element in self._loader_batch_data.items(): if isinstance(UpperCamelCase , UpperCamelCase ): # Convert ModelOutput to tuple first __UpperCAmelCase : Any = element.to_tuple() if isinstance(element[0] , torch.Tensor ): __UpperCAmelCase : Union[str, Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __UpperCAmelCase : str = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(UpperCamelCase , UpperCamelCase ): # Those are stored as lists of tensors so need specific unbatching. if isinstance(element[0] , torch.Tensor ): __UpperCAmelCase : List[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element ) elif isinstance(element[0] , np.ndarray ): __UpperCAmelCase : Dict = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element ) continue if element is None: # This can happen for optional data that get passed around __UpperCAmelCase : Tuple = None elif isinstance(element[self._loader_batch_index] , torch.Tensor ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __UpperCAmelCase : Tuple = element[self._loader_batch_index].unsqueeze(0 ) elif isinstance(element[self._loader_batch_index] , np.ndarray ): # Take correct batch data, but make it looked like batch_size=1 # For compatibility with other methods within transformers __UpperCAmelCase : List[str] = np.expand_dims(element[self._loader_batch_index] , 0 ) else: # This is typically a list, so no need to `unsqueeze`. __UpperCAmelCase : List[Any] = element[self._loader_batch_index] # Recreate the element by reusing the original class to make it look # batch_size=1 __UpperCAmelCase : List[str] = self._loader_batch_data.__class__(UpperCamelCase ) self._loader_batch_index += 1 return result def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: # We are currently unrolling a batch so we just need to return # the current item within a batch return self.loader_batch_item() # We're out of items within a batch __UpperCAmelCase : List[Any] = next(self.iterator ) __UpperCAmelCase : int = self.infer(UpperCamelCase , **self.params ) # We now have a batch of "inferred things". if self.loader_batch_size is not None: # Try to infer the size of the batch if isinstance(UpperCamelCase , torch.Tensor ): __UpperCAmelCase : int = processed else: __UpperCAmelCase : Optional[Any] = list(processed.keys() )[0] __UpperCAmelCase : List[str] = processed[key] if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : int = len(UpperCamelCase ) else: __UpperCAmelCase : List[str] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __UpperCAmelCase : Dict = observed_batch_size # Setting internal index to unwrap the batch __UpperCAmelCase : List[str] = processed __UpperCAmelCase : int = 0 return self.loader_batch_item() else: # We're not unrolling batches return processed class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : int , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Optional[int]=None ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def __iter__( self : Tuple ): '''simple docstring''' __UpperCAmelCase : List[Any] = iter(self.loader ) __UpperCAmelCase : Optional[Any] = None return self def lowerCamelCase__ ( self : Dict ): '''simple docstring''' if self.subiterator is None: __UpperCAmelCase : Union[str, Any] = self.infer(next(self.iterator ) , **self.params ) try: # Try to return next item __UpperCAmelCase : List[str] = next(self.subiterator ) except StopIteration: # When a preprocess iterator ends, we can start lookig at the next item # ChunkIterator will keep feeding until ALL elements of iterator # all have created their subiterator and have been iterating against. # # Another way to look at it, is we're basically flattening lists of lists # into a single list, but with generators __UpperCAmelCase : str = self.infer(next(self.iterator ) , **self.params ) __UpperCAmelCase : Any = next(self.subiterator ) return processed class lowerCamelCase__ ( A ): """simple docstring""" def __iter__( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[Any] = iter(self.loader ) return self def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : str = False __UpperCAmelCase : str = [] if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size: while self._loader_batch_index < self.loader_batch_size: __UpperCAmelCase : Optional[int] = self.loader_batch_item() __UpperCAmelCase : List[str] = item.pop("""is_last""" ) accumulator.append(UpperCamelCase ) if is_last: return accumulator while not is_last: __UpperCAmelCase : Optional[Any] = self.infer(next(self.iterator ) , **self.params ) if self.loader_batch_size is not None: if isinstance(UpperCamelCase , torch.Tensor ): __UpperCAmelCase : Tuple = processed else: __UpperCAmelCase : Optional[int] = list(processed.keys() )[0] __UpperCAmelCase : List[str] = processed[key] if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Dict = len(UpperCamelCase ) else: __UpperCAmelCase : Optional[Any] = first_tensor.shape[0] if 0 < observed_batch_size < self.loader_batch_size: # could be last batch so we can't unroll as many # elements. __UpperCAmelCase : Optional[Any] = observed_batch_size __UpperCAmelCase : Dict = processed __UpperCAmelCase : Dict = 0 while self._loader_batch_index < self.loader_batch_size: __UpperCAmelCase : Union[str, Any] = self.loader_batch_item() __UpperCAmelCase : Tuple = item.pop("""is_last""" ) accumulator.append(UpperCamelCase ) if is_last: return accumulator else: __UpperCAmelCase : int = processed __UpperCAmelCase : Tuple = item.pop("""is_last""" ) accumulator.append(UpperCamelCase ) return accumulator class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : str , UpperCamelCase : Dataset , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[int] = dataset __UpperCAmelCase : Optional[Any] = key def __len__( self : Optional[int] ): '''simple docstring''' return len(self.dataset ) def __getitem__( self : List[str] , UpperCamelCase : Dict ): '''simple docstring''' return self.dataset[i][self.key] class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[str] , UpperCamelCase : Dataset , UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : List[str] = dataset __UpperCAmelCase : Tuple = keya __UpperCAmelCase : List[Any] = keya def __len__( self : List[str] ): '''simple docstring''' return len(self.dataset ) def __getitem__( self : List[str] , UpperCamelCase : List[str] ): '''simple docstring''' return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Optional[int]=13 , UpperCamelCase : Optional[int]=7 , UpperCamelCase : int=True , UpperCamelCase : Optional[Any]=True , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : Any=True , UpperCamelCase : int=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Any=2 , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Any=0 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Any=5 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Any=512 , UpperCamelCase : int=12 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Any=4 , UpperCamelCase : Optional[int]="last" , UpperCamelCase : Optional[int]=None , UpperCamelCase : Dict=None , ): '''simple docstring''' __UpperCAmelCase : List[str] = parent __UpperCAmelCase : List[Any] = batch_size __UpperCAmelCase : List[Any] = seq_length __UpperCAmelCase : Tuple = is_training __UpperCAmelCase : Optional[Any] = use_input_lengths __UpperCAmelCase : Dict = use_token_type_ids __UpperCAmelCase : List[Any] = use_labels __UpperCAmelCase : List[Any] = gelu_activation __UpperCAmelCase : Optional[Any] = sinusoidal_embeddings __UpperCAmelCase : Optional[Any] = causal __UpperCAmelCase : Any = asm __UpperCAmelCase : List[Any] = n_langs __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : List[Any] = n_special __UpperCAmelCase : Any = hidden_size __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : List[Any] = num_attention_heads __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : List[str] = max_position_embeddings __UpperCAmelCase : Any = type_vocab_size __UpperCAmelCase : int = type_sequence_label_size __UpperCAmelCase : str = initializer_range __UpperCAmelCase : Optional[int] = num_labels __UpperCAmelCase : Any = num_choices __UpperCAmelCase : str = summary_type __UpperCAmelCase : List[Any] = use_proj __UpperCAmelCase : List[Any] = scope def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Optional[Any] = None if self.use_input_lengths: __UpperCAmelCase : Tuple = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length __UpperCAmelCase : Optional[Any] = None if self.use_token_type_ids: __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : List[str] = None __UpperCAmelCase : Any = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Any = ids_tensor([self.batch_size] , 2 ).float() __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : List[Any] = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaubertModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Any = model(UpperCamelCase , lengths=UpperCamelCase , langs=UpperCamelCase ) __UpperCAmelCase : Dict = model(UpperCamelCase , langs=UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : List[str] = FlaubertWithLMHeadModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : int , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = FlaubertForQuestionAnsweringSimple(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model(UpperCamelCase ) __UpperCAmelCase : int = model(UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[str] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : str = FlaubertForQuestionAnswering(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model( UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , cls_index=UpperCamelCase , is_impossible=UpperCamelCase , p_mask=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model( UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , cls_index=UpperCamelCase , is_impossible=UpperCamelCase , ) ((__UpperCAmelCase) ,) : Optional[Any] = result_with_labels.to_tuple() __UpperCAmelCase : Optional[Any] = model(UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase ) ((__UpperCAmelCase) ,) : str = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , ): '''simple docstring''' __UpperCAmelCase : Any = FlaubertForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : Any = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.num_labels __UpperCAmelCase : Tuple = FlaubertForTokenClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : str = model(UpperCamelCase , attention_mask=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , ): '''simple docstring''' __UpperCAmelCase : Any = self.num_choices __UpperCAmelCase : Dict = FlaubertForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Tuple = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Any = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : List[str] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Optional[Any] = config_and_inputs __UpperCAmelCase : List[str] = { """input_ids""": input_ids, """token_type_ids""": token_type_ids, """lengths""": input_lengths, """attention_mask""": input_mask, } return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) __a = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str ): '''simple docstring''' if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith("""Fast""" ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[Any]=False ): '''simple docstring''' __UpperCAmelCase : Optional[int] = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": __UpperCAmelCase : Optional[int] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) __UpperCAmelCase : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) return inputs_dict def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = FlaubertModelTester(self ) __UpperCAmelCase : Any = ConfigTester(self , config_class=UpperCamelCase , emb_dim=37 ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*UpperCamelCase ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase ) @slow def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : Any = FlaubertModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow @require_torch_gpu def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return __UpperCAmelCase : int = True __UpperCAmelCase : List[Any] = model_class(config=UpperCamelCase ) __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[str] = torch.jit.trace( UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCamelCase , os.path.join(UpperCamelCase , """traced_model.pt""" ) ) __UpperCAmelCase : int = torch.jit.load(os.path.join(UpperCamelCase , """traced_model.pt""" ) , map_location=UpperCamelCase ) loaded(inputs_dict["""input_ids"""].to(UpperCamelCase ) , inputs_dict["""attention_mask"""].to(UpperCamelCase ) ) @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : str = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" ) __UpperCAmelCase : List[str] = torch.tensor([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) with torch.no_grad(): __UpperCAmelCase : Any = model(UpperCamelCase )[0] __UpperCAmelCase : Union[str, Any] = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , UpperCamelCase ) __UpperCAmelCase : Tuple = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase , atol=1e-4 ) )
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : Optional[int] = {'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : str = { 'tokenizer_file': { 'bigscience/tokenizer': 'https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json', 'bigscience/bloom-560m': 'https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json', 'bigscience/bloom-1b1': 'https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json', 'bigscience/bloom-1b7': 'https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json', 'bigscience/bloom-3b': 'https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json', 'bigscience/bloom-7b1': 'https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json', 'bigscience/bloom': 'https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json', }, } class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = ["""input_ids""", """attention_mask"""] __a = None def __init__( self : Optional[int] , UpperCamelCase : Dict=None , UpperCamelCase : Dict=None , UpperCamelCase : Any=None , UpperCamelCase : int="<unk>" , UpperCamelCase : int="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : str="<pad>" , UpperCamelCase : Tuple=False , UpperCamelCase : Tuple=False , **UpperCamelCase : Optional[Any] , ): '''simple docstring''' super().__init__( UpperCamelCase , UpperCamelCase , tokenizer_file=UpperCamelCase , unk_token=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , pad_token=UpperCamelCase , add_prefix_space=UpperCamelCase , clean_up_tokenization_spaces=UpperCamelCase , **UpperCamelCase , ) __UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase ) != add_prefix_space: __UpperCAmelCase : int = getattr(UpperCamelCase , pre_tok_state.pop("""type""" ) ) __UpperCAmelCase : Union[str, Any] = add_prefix_space __UpperCAmelCase : List[Any] = pre_tok_class(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = add_prefix_space def lowerCamelCase__ ( self : Optional[Any] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : str = kwargs.get("""is_split_into_words""" , UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' """ pretokenized inputs.""" ) return super()._batch_encode_plus(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : Tuple , **UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : List[Any] = kwargs.get("""is_split_into_words""" , UpperCamelCase ) if not (self.add_prefix_space or not is_split_into_words): raise Exception( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with''' """ pretokenized inputs.""" ) return super()._encode_plus(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' __UpperCAmelCase : int = self._tokenizer.model.save(UpperCamelCase , name=UpperCamelCase ) return tuple(UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : "Conversation" ): '''simple docstring''' __UpperCAmelCase : Optional[int] = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) + [self.eos_token_id] ) if len(UpperCamelCase ) > self.model_max_length: __UpperCAmelCase : Any = input_ids[-self.model_max_length :] return input_ids
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" import re from filelock import FileLock try: import nltk UpperCAmelCase : List[str] = True except (ImportError, ModuleNotFoundError): UpperCAmelCase : Any = False if NLTK_AVAILABLE: with FileLock('.lock') as lock: nltk.download('punkt', quiet=True) def lowerCamelCase ( _UpperCamelCase : str ) -> str: '''simple docstring''' re.sub("""<n>""" , """""" , _UpperCamelCase ) # remove pegasus newline char assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)" return "\n".join(nltk.sent_tokenize(_UpperCamelCase ) )
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) -> Dict: '''simple docstring''' print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" ) for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if dist[i][j] != float("""inf""" ): print(int(dist[i][j] ) , end="""\t""" ) else: print("""INF""" , end="""\t""" ) print() def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Optional[Any] ) -> int: '''simple docstring''' __UpperCAmelCase : Any = [[float("""inf""" ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): __UpperCAmelCase : Optional[Any] = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_UpperCamelCase ): # looping through rows of graph array for i in range(_UpperCamelCase ): # looping through columns of graph array for j in range(_UpperCamelCase ): if ( dist[i][k] != float("""inf""" ) and dist[k][j] != float("""inf""" ) and dist[i][k] + dist[k][j] < dist[i][j] ): __UpperCAmelCase : Optional[int] = dist[i][k] + dist[k][j] _print_dist(_UpperCamelCase , _UpperCamelCase ) return dist, v if __name__ == "__main__": UpperCAmelCase : str = int(input('Enter number of vertices: ')) UpperCAmelCase : List[str] = int(input('Enter number of edges: ')) UpperCAmelCase : Optional[int] = [[float('inf') for i in range(v)] for j in range(v)] for i in range(v): UpperCAmelCase : Optional[int] = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('\nEdge ', i + 1) UpperCAmelCase : Dict = int(input('Enter source:')) UpperCAmelCase : List[Any] = int(input('Enter destination:')) UpperCAmelCase : int = float(input('Enter weight:')) UpperCAmelCase : Optional[int] = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" UpperCAmelCase : List[Any] = frozenset( [ 'prompt', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) UpperCAmelCase : Optional[int] = frozenset(['prompt', 'negative_prompt']) UpperCAmelCase : Tuple = frozenset([]) UpperCAmelCase : Tuple = frozenset(['image']) UpperCAmelCase : Union[str, Any] = frozenset( [ 'image', 'height', 'width', 'guidance_scale', ] ) UpperCAmelCase : Optional[Any] = frozenset(['image']) UpperCAmelCase : Optional[Any] = frozenset( [ 'prompt', 'image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) UpperCAmelCase : Dict = frozenset(['prompt', 'image', 'negative_prompt']) UpperCAmelCase : int = frozenset( [ # Text guided image variation with an image mask 'prompt', 'image', 'mask_image', 'height', 'width', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', ] ) UpperCAmelCase : Optional[int] = frozenset(['prompt', 'image', 'mask_image', 'negative_prompt']) UpperCAmelCase : List[str] = frozenset( [ # image variation with an image mask 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) UpperCAmelCase : str = frozenset(['image', 'mask_image']) UpperCAmelCase : Union[str, Any] = frozenset( [ 'example_image', 'image', 'mask_image', 'height', 'width', 'guidance_scale', ] ) UpperCAmelCase : Dict = frozenset(['example_image', 'image', 'mask_image']) UpperCAmelCase : Any = frozenset(['class_labels']) UpperCAmelCase : List[str] = frozenset(['class_labels']) UpperCAmelCase : Optional[Any] = frozenset(['batch_size']) UpperCAmelCase : List[Any] = frozenset([]) UpperCAmelCase : str = frozenset(['batch_size']) UpperCAmelCase : List[str] = frozenset([]) UpperCAmelCase : Optional[Any] = frozenset( [ 'prompt', 'audio_length_in_s', 'guidance_scale', 'negative_prompt', 'prompt_embeds', 'negative_prompt_embeds', 'cross_attention_kwargs', ] ) UpperCAmelCase : Tuple = frozenset(['prompt', 'negative_prompt']) UpperCAmelCase : Any = frozenset(['input_tokens']) UpperCAmelCase : Optional[Any] = frozenset(['input_tokens'])
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Dict ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[str] = MobileBertConfig.from_json_file(_UpperCamelCase ) print(f'''Building PyTorch model from configuration: {config}''' ) __UpperCAmelCase : List[Any] = MobileBertForPreTraining(_UpperCamelCase ) # Load weights from tf checkpoint __UpperCAmelCase : List[Any] = load_tf_weights_in_mobilebert(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , _UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--mobilebert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained MobileBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_dpt import DPTImageProcessor UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[Any] , *UpperCamelCase : int , **UpperCamelCase : Optional[Any] ): '''simple docstring''' warnings.warn( """The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DPTImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" import json import os import unittest from transformers import MgpstrTokenizer from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = MgpstrTokenizer __a = False __a = {} __a = False def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' super().setUp() # fmt: off __UpperCAmelCase : List[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""] # fmt: on __UpperCAmelCase : Optional[Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) def lowerCamelCase__ ( self : Dict , **UpperCamelCase : Any ): '''simple docstring''' return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Any = """tester""" __UpperCAmelCase : Dict = """tester""" return input_text, output_text @unittest.skip("""MGP-STR always lower cases letters.""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' pass def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.get_tokenizers(do_lower_case=UpperCamelCase ) for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase : Any = """[SPECIAL_TOKEN]""" tokenizer.add_special_tokens({"""cls_token""": special_token} ) __UpperCAmelCase : Optional[Any] = tokenizer.encode([special_token] , add_special_tokens=UpperCamelCase ) self.assertEqual(len(UpperCamelCase ) , 1 ) __UpperCAmelCase : List[str] = tokenizer.decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) self.assertTrue(special_token not in decoded ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f'''{tokenizer.__class__.__name__}''' ): __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.get_input_output_texts(UpperCamelCase ) __UpperCAmelCase : str = tokenizer.tokenize(UpperCamelCase ) __UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase ) __UpperCAmelCase : Dict = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertNotEqual(len(UpperCamelCase ) , 0 ) __UpperCAmelCase : str = tokenizer.decode(UpperCamelCase ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(text_a.replace(""" """ , """""" ) , UpperCamelCase ) @unittest.skip("""MGP-STR tokenizer only handles one sequence.""" ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' pass @unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" from math import sqrt def lowerCamelCase ( _UpperCamelCase : int ) -> bool: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and ( number >= 0 ), "'number' must been an int and positive" __UpperCAmelCase : Dict = True # 0 and 1 are none primes. if number <= 1: __UpperCAmelCase : Dict = False for divisor in range(2 , int(round(sqrt(_UpperCamelCase ) ) ) + 1 ): # if 'number' divisible by 'divisor' then sets 'status' # of false and break up the loop. if number % divisor == 0: __UpperCAmelCase : Optional[int] = False break # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'status' must been from type bool" return status def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" # beginList: contains all natural numbers from 2 up to N __UpperCAmelCase : Union[str, Any] = list(range(2 , n + 1 ) ) __UpperCAmelCase : List[str] = [] # this list will be returns. # actual sieve of erathostenes for i in range(len(_UpperCamelCase ) ): for j in range(i + 1 , len(_UpperCamelCase ) ): if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0): __UpperCAmelCase : Union[str, Any] = 0 # filters actual prime numbers. __UpperCAmelCase : Any = [x for x in begin_list if x != 0] # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list" return ans def lowerCamelCase ( _UpperCamelCase : int ) -> str: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n > 2), "'N' must been an int and > 2" __UpperCAmelCase : List[str] = [] # iterates over all numbers between 2 up to N+1 # if a number is prime then appends to list 'ans' for number in range(2 , n + 1 ): if is_prime(_UpperCamelCase ): ans.append(_UpperCamelCase ) # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list" return ans def lowerCamelCase ( _UpperCamelCase : str ) -> Optional[Any]: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and number >= 0, "'number' must been an int and >= 0" __UpperCAmelCase : str = [] # this list will be returns of the function. # potential prime number factors. __UpperCAmelCase : Optional[Any] = 2 __UpperCAmelCase : List[str] = number if number == 0 or number == 1: ans.append(_UpperCamelCase ) # if 'number' not prime then builds the prime factorization of 'number' elif not is_prime(_UpperCamelCase ): while quotient != 1: if is_prime(_UpperCamelCase ) and (quotient % factor == 0): ans.append(_UpperCamelCase ) quotient /= factor else: factor += 1 else: ans.append(_UpperCamelCase ) # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type list" return ans def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Union[str, Any]: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __UpperCAmelCase : Optional[Any] = 0 # prime factorization of 'number' __UpperCAmelCase : Tuple = prime_factorization(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = max(_UpperCamelCase ) # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int" return ans def lowerCamelCase ( _UpperCamelCase : Tuple ) -> Union[str, Any]: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and ( number >= 0 ), "'number' bust been an int and >= 0" __UpperCAmelCase : Optional[int] = 0 # prime factorization of 'number' __UpperCAmelCase : Tuple = prime_factorization(_UpperCamelCase ) __UpperCAmelCase : Tuple = min(_UpperCamelCase ) # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'ans' must been from type int" return ans def lowerCamelCase ( _UpperCamelCase : List[str] ) -> int: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 == 0 , _UpperCamelCase ), "compare bust been from type bool" return number % 2 == 0 def lowerCamelCase ( _UpperCamelCase : Tuple ) -> List[str]: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ), "'number' must been an int" assert isinstance(number % 2 != 0 , _UpperCamelCase ), "compare bust been from type bool" return number % 2 != 0 def lowerCamelCase ( _UpperCamelCase : Any ) -> Union[str, Any]: '''simple docstring''' assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and (number > 2) and is_even(_UpperCamelCase ) ), "'number' must been an int, even and > 2" __UpperCAmelCase : str = [] # this list will returned # creates a list of prime numbers between 2 up to 'number' __UpperCAmelCase : Tuple = get_prime_numbers(_UpperCamelCase ) __UpperCAmelCase : Tuple = len(_UpperCamelCase ) # run variable for while-loops. __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : Tuple = None # exit variable. for break up the loops __UpperCAmelCase : int = True while i < len_pn and loop: __UpperCAmelCase : List[str] = i + 1 while j < len_pn and loop: if prime_numbers[i] + prime_numbers[j] == number: __UpperCAmelCase : List[Any] = False ans.append(prime_numbers[i] ) ans.append(prime_numbers[j] ) j += 1 i += 1 # precondition assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and (len(_UpperCamelCase ) == 2) and (ans[0] + ans[1] == number) and is_prime(ans[0] ) and is_prime(ans[1] ) ), "'ans' must contains two primes. And sum of elements must been eq 'number'" return ans def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Union[str, Any]: '''simple docstring''' assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) and (numbera >= 0) and (numbera >= 0) ), "'number1' and 'number2' must been positive integer." __UpperCAmelCase : Optional[Any] = 0 while numbera != 0: __UpperCAmelCase : Union[str, Any] = numbera % numbera __UpperCAmelCase : Any = numbera __UpperCAmelCase : Tuple = rest # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ) and ( numbera >= 0 ), "'number' must been from type int and positive" return numbera def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : Dict ) -> Optional[Any]: '''simple docstring''' assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) and (numbera >= 1) and (numbera >= 1) ), "'number1' and 'number2' must been positive integer." __UpperCAmelCase : Optional[Any] = 1 # actual answer that will be return. # for kgV (x,1) if numbera > 1 and numbera > 1: # builds the prime factorization of 'number1' and 'number2' __UpperCAmelCase : Dict = prime_factorization(_UpperCamelCase ) __UpperCAmelCase : List[Any] = prime_factorization(_UpperCamelCase ) elif numbera == 1 or numbera == 1: __UpperCAmelCase : str = [] __UpperCAmelCase : Any = [] __UpperCAmelCase : Tuple = max(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Any = 0 __UpperCAmelCase : Tuple = 0 __UpperCAmelCase : List[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2' # iterates through primeFac1 for n in prime_fac_a: if n not in done: if n in prime_fac_a: __UpperCAmelCase : Union[str, Any] = prime_fac_a.count(_UpperCamelCase ) __UpperCAmelCase : List[str] = prime_fac_a.count(_UpperCamelCase ) for _ in range(max(_UpperCamelCase , _UpperCamelCase ) ): ans *= n else: __UpperCAmelCase : List[str] = prime_fac_a.count(_UpperCamelCase ) for _ in range(_UpperCamelCase ): ans *= n done.append(_UpperCamelCase ) # iterates through primeFac2 for n in prime_fac_a: if n not in done: __UpperCAmelCase : Tuple = prime_fac_a.count(_UpperCamelCase ) for _ in range(_UpperCamelCase ): ans *= n done.append(_UpperCamelCase ) # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ) and ( ans >= 0 ), "'ans' must been from type int and positive" return ans def lowerCamelCase ( _UpperCamelCase : Any ) -> Tuple: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'number' must been a positive int" __UpperCAmelCase : List[str] = 0 __UpperCAmelCase : Optional[int] = 2 # this variable holds the answer while index < n: index += 1 ans += 1 # counts to the next number # if ans not prime then # runs to the next prime number. while not is_prime(_UpperCamelCase ): ans += 1 # precondition assert isinstance(_UpperCamelCase , _UpperCamelCase ) and is_prime( _UpperCamelCase ), "'ans' must been a prime number and from type int" return ans def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' assert ( is_prime(_UpperCamelCase ) and is_prime(_UpperCamelCase ) and (p_number_a < p_number_a) ), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'" __UpperCAmelCase : int = p_number_a + 1 # jump to the next number __UpperCAmelCase : Any = [] # this list will be returns. # if number is not prime then # fetch the next prime number. while not is_prime(_UpperCamelCase ): number += 1 while number < p_number_a: ans.append(_UpperCamelCase ) number += 1 # fetch the next prime number. while not is_prime(_UpperCamelCase ): number += 1 # precondition assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and ans[0] != p_number_a and ans[len(_UpperCamelCase ) - 1] != p_number_a ), "'ans' must been a list without the arguments" # 'ans' contains not 'pNumber1' and 'pNumber2' ! return ans def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Tuple: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 1), "'n' must been int and >= 1" __UpperCAmelCase : List[str] = [] # will be returned. for divisor in range(1 , n + 1 ): if n % divisor == 0: ans.append(_UpperCamelCase ) # precondition assert ans[0] == 1 and ans[len(_UpperCamelCase ) - 1] == n, "Error in function getDivisiors(...)" return ans def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and ( number > 1 ), "'number' must been an int and >= 1" __UpperCAmelCase : List[str] = get_divisors(_UpperCamelCase ) # precondition assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and (divisors[0] == 1) and (divisors[len(_UpperCamelCase ) - 1] == number) ), "Error in help-function getDivisiors(...)" # summed all divisors up to 'number' (exclusive), hence [:-1] return sum(divisors[:-1] ) == number def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : int ) -> str: '''simple docstring''' assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(_UpperCamelCase , _UpperCamelCase ) and (denominator != 0) ), "The arguments must been from type int and 'denominator' != 0" # build the greatest common divisor of numerator and denominator. __UpperCAmelCase : Any = gcd(abs(_UpperCamelCase ) , abs(_UpperCamelCase ) ) # precondition assert ( isinstance(_UpperCamelCase , _UpperCamelCase ) and (numerator % gcd_of_fraction == 0) and (denominator % gcd_of_fraction == 0) ), "Error in function gcd(...,...)" return (numerator // gcd_of_fraction, denominator // gcd_of_fraction) def lowerCamelCase ( _UpperCamelCase : Dict ) -> Any: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been a int and >= 0" __UpperCAmelCase : Union[str, Any] = 1 # this will be return. for factor in range(1 , n + 1 ): ans *= factor return ans def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Union[str, Any]: '''simple docstring''' assert isinstance(_UpperCamelCase , _UpperCamelCase ) and (n >= 0), "'n' must been an int and >= 0" __UpperCAmelCase : int = 0 __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : List[Any] = 1 # this will be return for _ in range(n - 1 ): __UpperCAmelCase : Any = ans ans += fiba __UpperCAmelCase : List[Any] = tmp return ans
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig UpperCAmelCase : Optional[Any] = [ 'openmmlab/upernet-convnext-tiny', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring UpperCAmelCase : Any = 'UperNetConfig' class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Union[int, Tuple[int, int]] , UpperCamelCase : Union[int, Tuple[int, int], str] = 0 , UpperCamelCase : bool = False , UpperCamelCase : Union[int, Tuple[int, int]] = 1 , ): '''simple docstring''' super().__init__() __UpperCAmelCase : List[str] = nn.Convad( in_channels=UpperCamelCase , out_channels=UpperCamelCase , kernel_size=UpperCamelCase , padding=UpperCamelCase , bias=UpperCamelCase , dilation=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = nn.BatchNormad(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = nn.ReLU() def lowerCamelCase__ ( self : List[str] , UpperCamelCase : torch.Tensor ): '''simple docstring''' __UpperCAmelCase : int = self.conv(UpperCamelCase ) __UpperCAmelCase : str = self.batch_norm(UpperCamelCase ) __UpperCAmelCase : Dict = self.activation(UpperCamelCase ) return output class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' super().__init__() __UpperCAmelCase : int = [ nn.AdaptiveAvgPoolad(UpperCamelCase ), UperNetConvModule(UpperCamelCase , UpperCamelCase , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(UpperCamelCase ) , UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : torch.Tensor ): '''simple docstring''' __UpperCAmelCase : Optional[int] = input for layer in self.layers: __UpperCAmelCase : Dict = layer(UpperCamelCase ) return hidden_state class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase : Tuple[int, ...] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : bool ): '''simple docstring''' super().__init__() __UpperCAmelCase : Any = pool_scales __UpperCAmelCase : Any = align_corners __UpperCAmelCase : List[str] = in_channels __UpperCAmelCase : Union[str, Any] = channels __UpperCAmelCase : List[str] = [] for i, pool_scale in enumerate(UpperCamelCase ): __UpperCAmelCase : Optional[Any] = UperNetPyramidPoolingBlock(pool_scale=UpperCamelCase , in_channels=UpperCamelCase , channels=UpperCamelCase ) self.blocks.append(UpperCamelCase ) self.add_module(str(UpperCamelCase ) , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : torch.Tensor ): '''simple docstring''' __UpperCAmelCase : Tuple = [] for ppm in self.blocks: __UpperCAmelCase : Union[str, Any] = ppm(UpperCamelCase ) __UpperCAmelCase : Tuple = nn.functional.interpolate( UpperCamelCase , size=x.size()[2:] , mode="""bilinear""" , align_corners=self.align_corners ) ppm_outs.append(UpperCamelCase ) return ppm_outs class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Tuple , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = config __UpperCAmelCase : Tuple = config.pool_scales # e.g. (1, 2, 3, 6) __UpperCAmelCase : Optional[Any] = in_channels __UpperCAmelCase : str = config.hidden_size __UpperCAmelCase : int = False __UpperCAmelCase : Tuple = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module __UpperCAmelCase : Union[str, Any] = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) __UpperCAmelCase : Optional[int] = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module __UpperCAmelCase : Tuple = nn.ModuleList() __UpperCAmelCase : int = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer __UpperCAmelCase : str = UperNetConvModule(UpperCamelCase , self.channels , kernel_size=1 ) __UpperCAmelCase : str = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(UpperCamelCase ) self.fpn_convs.append(UpperCamelCase ) __UpperCAmelCase : int = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.apply(self._init_weights ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[str] ): '''simple docstring''' if isinstance(UpperCamelCase , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = inputs[-1] __UpperCAmelCase : str = [x] psp_outs.extend(self.psp_modules(UpperCamelCase ) ) __UpperCAmelCase : Union[str, Any] = torch.cat(UpperCamelCase , dim=1 ) __UpperCAmelCase : int = self.bottleneck(UpperCamelCase ) return output def lowerCamelCase__ ( self : str , UpperCamelCase : torch.Tensor ): '''simple docstring''' __UpperCAmelCase : Optional[int] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(UpperCamelCase ) ) # build top-down path __UpperCAmelCase : Any = len(UpperCamelCase ) for i in range(used_backbone_levels - 1 , 0 , -1 ): __UpperCAmelCase : Any = laterals[i - 1].shape[2:] __UpperCAmelCase : Union[str, Any] = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=UpperCamelCase , mode="""bilinear""" , align_corners=self.align_corners ) # build outputs __UpperCAmelCase : List[str] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): __UpperCAmelCase : List[str] = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="""bilinear""" , align_corners=self.align_corners ) __UpperCAmelCase : int = torch.cat(UpperCamelCase , dim=1 ) __UpperCAmelCase : Optional[Any] = self.fpn_bottleneck(UpperCamelCase ) __UpperCAmelCase : int = self.classifier(UpperCamelCase ) return output class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Dict , UpperCamelCase : List[str] , UpperCamelCase : int = 2 , UpperCamelCase : int = 3 , UpperCamelCase : Union[int, Tuple[int, int]] = 1 ): '''simple docstring''' super().__init__() __UpperCAmelCase : Dict = config __UpperCAmelCase : Union[str, Any] = config.auxiliary_in_channels __UpperCAmelCase : List[Any] = config.auxiliary_channels __UpperCAmelCase : int = config.auxiliary_num_convs __UpperCAmelCase : Any = config.auxiliary_concat_input __UpperCAmelCase : Union[str, Any] = in_index __UpperCAmelCase : Tuple = (kernel_size // 2) * dilation __UpperCAmelCase : Tuple = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=UpperCamelCase , padding=UpperCamelCase , dilation=UpperCamelCase ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=UpperCamelCase , padding=UpperCamelCase , dilation=UpperCamelCase ) ) if self.num_convs == 0: __UpperCAmelCase : Dict = nn.Identity() else: __UpperCAmelCase : Dict = nn.Sequential(*UpperCamelCase ) if self.concat_input: __UpperCAmelCase : Dict = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=UpperCamelCase , padding=kernel_size // 2 ) __UpperCAmelCase : List[str] = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' self.apply(self._init_weights ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] ): '''simple docstring''' if isinstance(UpperCamelCase , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : torch.Tensor ): '''simple docstring''' __UpperCAmelCase : int = encoder_hidden_states[self.in_index] __UpperCAmelCase : int = self.convs(UpperCamelCase ) if self.concat_input: __UpperCAmelCase : List[str] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) __UpperCAmelCase : List[Any] = self.classifier(UpperCamelCase ) return output class lowerCamelCase__ ( A ): """simple docstring""" __a = UperNetConfig __a = """pixel_values""" __a = True def lowerCamelCase__ ( self : Any , UpperCamelCase : int ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def lowerCamelCase__ ( self : int ): '''simple docstring''' self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Tuple=False ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Optional[Any] = value UpperCAmelCase : Any = R'\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n' UpperCAmelCase : Union[str, Any] = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n' @add_start_docstrings( """UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.""" , A , ) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Union[str, Any] , UpperCamelCase : int ): '''simple docstring''' super().__init__(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) __UpperCAmelCase : Optional[int] = UperNetHead(UpperCamelCase , in_channels=self.backbone.channels ) __UpperCAmelCase : Optional[int] = UperNetFCNHead(UpperCamelCase ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("""batch_size, sequence_length""" ) ) @replace_return_docstrings(output_type=UpperCamelCase , config_class=_CONFIG_FOR_DOC ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Optional[torch.Tensor] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[bool] = None , UpperCamelCase : Optional[torch.Tensor] = None , UpperCamelCase : Optional[bool] = None , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict __UpperCAmelCase : Any = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCAmelCase : Optional[Any] = output_attentions if output_attentions is not None else self.config.output_attentions __UpperCAmelCase : Any = self.backbone.forward_with_filtered_kwargs( UpperCamelCase , output_hidden_states=UpperCamelCase , output_attentions=UpperCamelCase ) __UpperCAmelCase : List[str] = outputs.feature_maps __UpperCAmelCase : Union[str, Any] = self.decode_head(UpperCamelCase ) __UpperCAmelCase : Tuple = nn.functional.interpolate(UpperCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=UpperCamelCase ) __UpperCAmelCase : List[Any] = None if self.auxiliary_head is not None: __UpperCAmelCase : Optional[Any] = self.auxiliary_head(UpperCamelCase ) __UpperCAmelCase : Dict = nn.functional.interpolate( UpperCamelCase , size=pixel_values.shape[2:] , mode="""bilinear""" , align_corners=UpperCamelCase ) __UpperCAmelCase : Tuple = None if labels is not None: if self.config.num_labels == 1: raise ValueError("""The number of labels should be greater than one""" ) else: # compute weighted loss __UpperCAmelCase : str = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) __UpperCAmelCase : int = loss_fct(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : int = loss_fct(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[int] = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: __UpperCAmelCase : Dict = (logits,) + outputs[1:] else: __UpperCAmelCase : int = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=UpperCamelCase , logits=UpperCamelCase , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : Optional[Any] = { 'microsoft/swinv2-tiny-patch4-window8-256': ( 'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json' ), } class lowerCamelCase__ ( A ): """simple docstring""" __a = """swinv2""" __a = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[Any] , UpperCamelCase : Optional[Any]=224 , UpperCamelCase : Dict=4 , UpperCamelCase : Tuple=3 , UpperCamelCase : Tuple=96 , UpperCamelCase : int=[2, 2, 6, 2] , UpperCamelCase : Union[str, Any]=[3, 6, 12, 24] , UpperCamelCase : Any=7 , UpperCamelCase : Union[str, Any]=4.0 , UpperCamelCase : Tuple=True , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Any=0.0 , UpperCamelCase : Dict=0.1 , UpperCamelCase : Dict="gelu" , UpperCamelCase : Optional[int]=False , UpperCamelCase : int=0.02 , UpperCamelCase : str=1e-5 , UpperCamelCase : Tuple=32 , **UpperCamelCase : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Optional[Any] = image_size __UpperCAmelCase : str = patch_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : Any = embed_dim __UpperCAmelCase : Tuple = depths __UpperCAmelCase : str = len(UpperCamelCase ) __UpperCAmelCase : int = num_heads __UpperCAmelCase : List[str] = window_size __UpperCAmelCase : List[str] = mlp_ratio __UpperCAmelCase : Dict = qkv_bias __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : int = attention_probs_dropout_prob __UpperCAmelCase : Any = drop_path_rate __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Dict = use_absolute_embeddings __UpperCAmelCase : Any = layer_norm_eps __UpperCAmelCase : List[str] = initializer_range __UpperCAmelCase : Any = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase : int = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) ) __UpperCAmelCase : int = (0, 0, 0, 0)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" # coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import sys import transformers UpperCAmelCase : str = '3' print('Python version:', sys.version) print('transformers version:', transformers.__version__) try: import torch print('Torch version:', torch.__version__) print('Cuda available:', torch.cuda.is_available()) print('Cuda version:', torch.version.cuda) print('CuDNN version:', torch.backends.cudnn.version()) print('Number of GPUs available:', torch.cuda.device_count()) print('NCCL version:', torch.cuda.nccl.version()) except ImportError: print('Torch version:', None) try: import deepspeed print('DeepSpeed version:', deepspeed.__version__) except ImportError: print('DeepSpeed version:', None) try: import tensorflow as tf print('TensorFlow version:', tf.__version__) print('TF GPUs available:', bool(tf.config.list_physical_devices('GPU'))) print('Number of TF GPUs available:', len(tf.config.list_physical_devices('GPU'))) except ImportError: print('TensorFlow version:', None)
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Optional[int] = { 'edbeeching/decision-transformer-gym-hopper-medium': ( 'https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json' ), # See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer } class lowerCamelCase__ ( A ): """simple docstring""" __a = """decision_transformer""" __a = ["""past_key_values"""] __a = { """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , UpperCamelCase : str=17 , UpperCamelCase : List[Any]=4 , UpperCamelCase : str=128 , UpperCamelCase : Optional[int]=4_096 , UpperCamelCase : List[str]=True , UpperCamelCase : List[str]=1 , UpperCamelCase : Union[str, Any]=1_024 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : List[str]=1 , UpperCamelCase : List[Any]=None , UpperCamelCase : Any="relu" , UpperCamelCase : Dict=0.1 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Union[str, Any]=1e-5 , UpperCamelCase : str=0.02 , UpperCamelCase : Tuple=True , UpperCamelCase : Any=True , UpperCamelCase : List[Any]=50_256 , UpperCamelCase : int=50_256 , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Union[str, Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : List[str] = state_dim __UpperCAmelCase : List[Any] = act_dim __UpperCAmelCase : Any = hidden_size __UpperCAmelCase : int = max_ep_len __UpperCAmelCase : Union[str, Any] = action_tanh __UpperCAmelCase : str = vocab_size __UpperCAmelCase : Optional[Any] = n_positions __UpperCAmelCase : Optional[Any] = n_layer __UpperCAmelCase : Optional[Any] = n_head __UpperCAmelCase : Optional[int] = n_inner __UpperCAmelCase : Union[str, Any] = activation_function __UpperCAmelCase : Optional[int] = resid_pdrop __UpperCAmelCase : Optional[int] = embd_pdrop __UpperCAmelCase : Any = attn_pdrop __UpperCAmelCase : Union[str, Any] = layer_norm_epsilon __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : int = scale_attn_weights __UpperCAmelCase : Tuple = use_cache __UpperCAmelCase : Any = scale_attn_by_inverse_layer_idx __UpperCAmelCase : Dict = reorder_and_upcast_attn __UpperCAmelCase : Dict = bos_token_id __UpperCAmelCase : List[Any] = eos_token_id super().__init__(bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , **UpperCamelCase )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets UpperCAmelCase : str = '\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n' UpperCAmelCase : int = '\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n' UpperCAmelCase : Optional[int] = '\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=["About 95 species are currently accepted ."]\n >>> predictions=["About 95 you now get in ."]\n >>> references=[["About 95 species are currently known ."]]\n >>> wiki_split = datasets.load_metric("wiki_split")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {\'sari\': 21.805555555555557, \'sacrebleu\': 14.535768424205482, \'exact\': 0.0}\n' def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> int: '''simple docstring''' def remove_articles(_UpperCamelCase : str ): __UpperCAmelCase : Optional[Any] = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE ) return re.sub(_UpperCamelCase , """ """ , _UpperCamelCase ) def white_space_fix(_UpperCamelCase : Any ): return " ".join(text.split() ) def remove_punc(_UpperCamelCase : Dict ): __UpperCAmelCase : Any = set(string.punctuation ) return "".join(ch for ch in text if ch not in exclude ) def lower(_UpperCamelCase : Dict ): return text.lower() return white_space_fix(remove_articles(remove_punc(lower(_UpperCamelCase ) ) ) ) def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' return int(normalize_answer(_UpperCamelCase ) == normalize_answer(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Dict ) -> Any: '''simple docstring''' __UpperCAmelCase : List[Any] = [any(compute_exact(_UpperCamelCase , _UpperCamelCase ) for ref in refs ) for pred, refs in zip(_UpperCamelCase , _UpperCamelCase )] return (sum(_UpperCamelCase ) / len(_UpperCamelCase )) * 1_0_0 def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Any , _UpperCamelCase : Tuple ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = [rgram for rgrams in rgramslist for rgram in rgrams] __UpperCAmelCase : Dict = Counter(_UpperCamelCase ) __UpperCAmelCase : int = Counter(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = Counter() for sgram, scount in sgramcounter.items(): __UpperCAmelCase : Dict = scount * numref __UpperCAmelCase : Dict = Counter(_UpperCamelCase ) __UpperCAmelCase : Tuple = Counter() for cgram, ccount in cgramcounter.items(): __UpperCAmelCase : Optional[int] = ccount * numref # KEEP __UpperCAmelCase : Union[str, Any] = sgramcounter_rep & cgramcounter_rep __UpperCAmelCase : Union[str, Any] = keepgramcounter_rep & rgramcounter __UpperCAmelCase : List[str] = sgramcounter_rep & rgramcounter __UpperCAmelCase : Dict = 0 __UpperCAmelCase : List[str] = 0 for keepgram in keepgramcountergood_rep: keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram] # Fix an alleged bug [2] in the keep score computation. # keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram] keeptmpscorea += keepgramcountergood_rep[keepgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Any = 1 if len(_UpperCamelCase ) > 0: __UpperCAmelCase : Any = keeptmpscorea / len(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: # Fix an alleged bug [2] in the keep score computation. # keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep) __UpperCAmelCase : Optional[int] = keeptmpscorea / sum(keepgramcounterall_rep.values() ) __UpperCAmelCase : int = 0 if keepscore_precision > 0 or keepscore_recall > 0: __UpperCAmelCase : List[str] = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall) # DELETION __UpperCAmelCase : Any = sgramcounter_rep - cgramcounter_rep __UpperCAmelCase : int = delgramcounter_rep - rgramcounter __UpperCAmelCase : Union[str, Any] = sgramcounter_rep - rgramcounter __UpperCAmelCase : Union[str, Any] = 0 __UpperCAmelCase : str = 0 for delgram in delgramcountergood_rep: deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram] deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram] # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __UpperCAmelCase : List[str] = 1 if len(_UpperCamelCase ) > 0: __UpperCAmelCase : List[Any] = deltmpscorea / len(_UpperCamelCase ) # ADDITION __UpperCAmelCase : List[Any] = set(_UpperCamelCase ) - set(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = set(_UpperCamelCase ) & set(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = set(_UpperCamelCase ) - set(_UpperCamelCase ) __UpperCAmelCase : int = 0 for addgram in addgramcountergood: addtmpscore += 1 # Define 0/0=1 instead of 0 to give higher scores for predictions that match # a target exactly. __UpperCAmelCase : Optional[Any] = 1 __UpperCAmelCase : Tuple = 1 if len(_UpperCamelCase ) > 0: __UpperCAmelCase : Any = addtmpscore / len(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: __UpperCAmelCase : Dict = addtmpscore / len(_UpperCamelCase ) __UpperCAmelCase : Dict = 0 if addscore_precision > 0 or addscore_recall > 0: __UpperCAmelCase : Dict = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall) return (keepscore, delscore_precision, addscore) def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Dict ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = len(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = ssent.split(""" """ ) __UpperCAmelCase : Optional[int] = csent.split(""" """ ) __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : str = [] __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Optional[Any] = [] __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Union[str, Any] = [] for rsent in rsents: __UpperCAmelCase : str = rsent.split(""" """ ) __UpperCAmelCase : str = [] __UpperCAmelCase : Any = [] __UpperCAmelCase : List[str] = [] ragramslist.append(_UpperCamelCase ) for i in range(0 , len(_UpperCamelCase ) - 1 ): if i < len(_UpperCamelCase ) - 1: __UpperCAmelCase : Tuple = ragrams[i] + """ """ + ragrams[i + 1] ragrams.append(_UpperCamelCase ) if i < len(_UpperCamelCase ) - 2: __UpperCAmelCase : Dict = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] ragrams.append(_UpperCamelCase ) if i < len(_UpperCamelCase ) - 3: __UpperCAmelCase : Any = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3] ragrams.append(_UpperCamelCase ) ragramslist.append(_UpperCamelCase ) ragramslist.append(_UpperCamelCase ) ragramslist.append(_UpperCamelCase ) for i in range(0 , len(_UpperCamelCase ) - 1 ): if i < len(_UpperCamelCase ) - 1: __UpperCAmelCase : List[Any] = sagrams[i] + """ """ + sagrams[i + 1] sagrams.append(_UpperCamelCase ) if i < len(_UpperCamelCase ) - 2: __UpperCAmelCase : Union[str, Any] = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] sagrams.append(_UpperCamelCase ) if i < len(_UpperCamelCase ) - 3: __UpperCAmelCase : Dict = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3] sagrams.append(_UpperCamelCase ) for i in range(0 , len(_UpperCamelCase ) - 1 ): if i < len(_UpperCamelCase ) - 1: __UpperCAmelCase : Dict = cagrams[i] + """ """ + cagrams[i + 1] cagrams.append(_UpperCamelCase ) if i < len(_UpperCamelCase ) - 2: __UpperCAmelCase : List[str] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] cagrams.append(_UpperCamelCase ) if i < len(_UpperCamelCase ) - 3: __UpperCAmelCase : List[Any] = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3] cagrams.append(_UpperCamelCase ) ((__UpperCAmelCase) ,(__UpperCAmelCase) ,(__UpperCAmelCase)) : Tuple = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((__UpperCAmelCase) ,(__UpperCAmelCase) ,(__UpperCAmelCase)) : str = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((__UpperCAmelCase) ,(__UpperCAmelCase) ,(__UpperCAmelCase)) : List[str] = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) ((__UpperCAmelCase) ,(__UpperCAmelCase) ,(__UpperCAmelCase)) : Dict = SARIngram(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : List[str] = sum([keepascore, keepascore, keepascore, keepascore] ) / 4 __UpperCAmelCase : Optional[Any] = sum([delascore, delascore, delascore, delascore] ) / 4 __UpperCAmelCase : List[Any] = sum([addascore, addascore, addascore, addascore] ) / 4 __UpperCAmelCase : int = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : bool = True , _UpperCamelCase : str = "13a" , _UpperCamelCase : bool = True ) -> Union[str, Any]: '''simple docstring''' if lowercase: __UpperCAmelCase : Dict = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__ ).major >= 2: __UpperCAmelCase : int = sacrebleu.metrics.bleu._get_tokenizer(_UpperCamelCase )()(_UpperCamelCase ) else: __UpperCAmelCase : str = sacrebleu.TOKENIZERS[tokenizer]()(_UpperCamelCase ) elif tokenizer == "moses": __UpperCAmelCase : List[Any] = sacremoses.MosesTokenizer().tokenize(_UpperCamelCase , return_str=_UpperCamelCase , escape=_UpperCamelCase ) elif tokenizer == "penn": __UpperCAmelCase : List[Any] = sacremoses.MosesTokenizer().penn_tokenize(_UpperCamelCase , return_str=_UpperCamelCase ) else: __UpperCAmelCase : str = sentence if not return_str: __UpperCAmelCase : str = normalized_sent.split() return normalized_sent def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[str] ) -> List[str]: '''simple docstring''' if not (len(_UpperCamelCase ) == len(_UpperCamelCase ) == len(_UpperCamelCase )): raise ValueError("""Sources length must match predictions and references lengths.""" ) __UpperCAmelCase : int = 0 for src, pred, refs in zip(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ): sari_score += SARIsent(normalize(_UpperCamelCase ) , normalize(_UpperCamelCase ) , [normalize(_UpperCamelCase ) for sent in refs] ) __UpperCAmelCase : Tuple = sari_score / len(_UpperCamelCase ) return 1_0_0 * sari_score def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple="exp" , _UpperCamelCase : List[str]=None , _UpperCamelCase : List[str]=False , _UpperCamelCase : Any=False , _UpperCamelCase : Tuple=False , ) -> Dict: '''simple docstring''' __UpperCAmelCase : Any = len(references[0] ) if any(len(_UpperCamelCase ) != references_per_prediction for refs in references ): raise ValueError("""Sacrebleu requires the same number of references for each prediction""" ) __UpperCAmelCase : Dict = [[refs[i] for refs in references] for i in range(_UpperCamelCase )] __UpperCAmelCase : int = sacrebleu.corpus_bleu( _UpperCamelCase , _UpperCamelCase , smooth_method=_UpperCamelCase , smooth_value=_UpperCamelCase , force=_UpperCamelCase , lowercase=_UpperCamelCase , use_effective_order=_UpperCamelCase , ) return output.score @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : str ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ), } ) , codebase_urls=[ """https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""", """https://github.com/cocoxu/simplification/blob/master/SARI.py""", """https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""", """https://github.com/mjpost/sacreBLEU""", ] , reference_urls=[ """https://www.aclweb.org/anthology/Q16-1029.pdf""", """https://github.com/mjpost/sacreBLEU""", """https://en.wikipedia.org/wiki/BLEU""", """https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""", ] , ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = {} result.update({"""sari""": compute_sari(sources=UpperCamelCase , predictions=UpperCamelCase , references=UpperCamelCase )} ) result.update({"""sacrebleu""": compute_sacrebleu(predictions=UpperCamelCase , references=UpperCamelCase )} ) result.update({"""exact""": compute_em(predictions=UpperCamelCase , references=UpperCamelCase )} ) return result
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" from __future__ import annotations from fractions import Fraction def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> bool: '''simple docstring''' return ( num != den and num % 1_0 == den // 1_0 and (num // 1_0) / (den % 1_0) == num / den ) def lowerCamelCase ( _UpperCamelCase : int ) -> list[str]: '''simple docstring''' __UpperCAmelCase : str = [] __UpperCAmelCase : Tuple = 1_1 __UpperCAmelCase : Dict = int("""1""" + """0""" * digit_len ) for num in range(_UpperCamelCase , _UpperCamelCase ): while den <= 9_9: if (num != den) and (num % 1_0 == den // 1_0) and (den % 1_0 != 0): if is_digit_cancelling(_UpperCamelCase , _UpperCamelCase ): solutions.append(f'''{num}/{den}''' ) den += 1 num += 1 __UpperCAmelCase : int = 1_0 return solutions def lowerCamelCase ( _UpperCamelCase : int = 2 ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = 1.0 for fraction in fraction_list(_UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = Fraction(_UpperCamelCase ) result *= frac.denominator / frac.numerator return int(_UpperCamelCase ) if __name__ == "__main__": print(solution())
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" import tempfile import unittest from transformers import TaConfig, is_torch_available from transformers.testing_utils import ( require_sentencepiece, require_tokenizers, require_torch, slow, torch_device, ) from ...generation.test_utils import GenerationTesterMixin from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : Dict=99 , UpperCamelCase : Dict=13 , UpperCamelCase : List[str]=7 , UpperCamelCase : List[str]=9 , UpperCamelCase : Union[str, Any]=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=False , UpperCamelCase : Dict=32 , UpperCamelCase : Dict=5 , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : int=37 , UpperCamelCase : Any=8 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : int=0.002 , UpperCamelCase : List[Any]=1 , UpperCamelCase : List[str]=0 , UpperCamelCase : Optional[Any]=0 , UpperCamelCase : Union[str, Any]=None , UpperCamelCase : Union[str, Any]=None , ): '''simple docstring''' __UpperCAmelCase : Dict = parent __UpperCAmelCase : Union[str, Any] = batch_size __UpperCAmelCase : Tuple = encoder_seq_length __UpperCAmelCase : Dict = decoder_seq_length # For common tests __UpperCAmelCase : Union[str, Any] = self.decoder_seq_length __UpperCAmelCase : Any = is_training __UpperCAmelCase : Optional[int] = use_attention_mask __UpperCAmelCase : str = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Any = hidden_size __UpperCAmelCase : List[str] = num_hidden_layers __UpperCAmelCase : int = num_attention_heads __UpperCAmelCase : Optional[Any] = d_ff __UpperCAmelCase : Tuple = relative_attention_num_buckets __UpperCAmelCase : Optional[Any] = dropout_rate __UpperCAmelCase : Optional[Any] = initializer_factor __UpperCAmelCase : List[Any] = eos_token_id __UpperCAmelCase : Dict = pad_token_id __UpperCAmelCase : str = decoder_start_token_id __UpperCAmelCase : str = None __UpperCAmelCase : List[Any] = decoder_layers def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return TaConfig.from_pretrained("""google/umt5-base""" ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Dict=None , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Any=None , ): '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Tuple = input_ids.ne(config.pad_token_id ) if decoder_attention_mask is None: __UpperCAmelCase : int = decoder_input_ids.ne(config.pad_token_id ) if head_mask is None: __UpperCAmelCase : Union[str, Any] = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=UpperCamelCase ) if decoder_head_mask is None: __UpperCAmelCase : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase ) if cross_attn_head_mask is None: __UpperCAmelCase : Optional[Any] = torch.ones( config.num_decoder_layers , config.num_attention_heads , device=UpperCamelCase ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) # we need to clamp the input ids here to avoid having pad token in between # this is because for NllbMoe the position_ids are prepared such that # all pad tokens have pos id = 2 and rest are between 2..seq_length # and the seq_length here is seq_length - num_pad_tokens # but when using past, there is no way of knowing if the past input ids had # pad tokens in them, which results in incorrect seq_lenth and which in turn results in # position_ids being off by num_pad_tokens in past input __UpperCAmelCase : Dict = input_ids.clamp(self.pad_token_id + 1 ) __UpperCAmelCase : List[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 ) __UpperCAmelCase : List[Any] = self.get_config() __UpperCAmelCase : str = config.num_attention_heads __UpperCAmelCase : str = self.prepare_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, input_dict def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.prepare_config_and_inputs() return config, inputs_dict def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return TaConfig( vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return TaConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : str = UMTaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : List[str] = model( input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase , attention_mask=UpperCamelCase , decoder_attention_mask=UpperCamelCase , ) __UpperCAmelCase : List[str] = model(input_ids=UpperCamelCase , decoder_input_ids=UpperCamelCase ) __UpperCAmelCase : Optional[int] = result.last_hidden_state __UpperCAmelCase : Tuple = result.past_key_values __UpperCAmelCase : Optional[int] = result.encoder_last_hidden_state self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) ) self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) ) # There should be `num_layers` key value embeddings stored in decoder_past self.parent.assertEqual(len(UpperCamelCase ) , config.num_layers ) # There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple self.parent.assertEqual(len(decoder_past[0] ) , 4 ) def lowerCamelCase__ ( self : Any , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Dict = UMTaModel(config=UpperCamelCase ).get_decoder().to(UpperCamelCase ).eval() # first forward pass __UpperCAmelCase : Tuple = model(UpperCamelCase , use_cache=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase , use_cache=UpperCamelCase ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) ) self.parent.assertTrue(len(UpperCamelCase ) == len(UpperCamelCase ) + 1 ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids __UpperCAmelCase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ) # append to next input_ids and __UpperCAmelCase : str = torch.cat([input_ids, next_tokens] , dim=-1 ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase )["""last_hidden_state"""] __UpperCAmelCase : Dict = model(UpperCamelCase , past_key_values=UpperCamelCase )["""last_hidden_state"""] # select random slice __UpperCAmelCase : Dict = ids_tensor((1,) , output_from_past.shape[-1] ).item() __UpperCAmelCase : List[Any] = output_from_no_past[:, -1, random_slice_idx].detach() __UpperCAmelCase : Any = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : List[str] = UMTaModel(config=UpperCamelCase ).to(UpperCamelCase ).half().eval() __UpperCAmelCase : Dict = model(**UpperCamelCase )["""last_hidden_state"""] self.parent.assertFalse(torch.isnan(UpperCamelCase ).any().item() ) @require_torch class lowerCamelCase__ ( A , A , A , unittest.TestCase ): """simple docstring""" __a = ( (UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else () ) __a = (UMTaForConditionalGeneration,) if is_torch_available() else () __a = ( { """conversational""": UMTaForConditionalGeneration, """feature-extraction""": UMTaModel, """summarization""": UMTaForConditionalGeneration, """text2text-generation""": UMTaForConditionalGeneration, """translation""": UMTaForConditionalGeneration, """question-answering""": UMTaForQuestionAnswering, } if is_torch_available() else {} ) __a = True __a = False __a = False __a = True __a = True # The small UMT5 model needs higher percentages for CPU/MP tests __a = [0.8, 0.9] def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = UMTaModelTester(self ) @unittest.skip("""Test has a segmentation fault on torch 1.8.0""" ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() __UpperCAmelCase : List[str] = UMTaModel(config_and_inputs[0] ).to(UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , ) @unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_fpaa_forward(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : int = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""] __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() __UpperCAmelCase : Tuple = config_and_inputs[0] __UpperCAmelCase : str = UMTaForConditionalGeneration(UpperCamelCase ).eval() model.to(UpperCamelCase ) __UpperCAmelCase : List[str] = { """head_mask""": torch.zeros(config.num_layers , config.num_heads , device=UpperCamelCase ), """decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ), """cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=UpperCamelCase ), } for attn_name, (name, mask) in zip(UpperCamelCase , head_masking.items() ): __UpperCAmelCase : Any = {name: mask} # Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified if name == "head_mask": __UpperCAmelCase : int = torch.ones( config.num_decoder_layers , config.num_heads , device=UpperCamelCase ) __UpperCAmelCase : Tuple = model.generate( config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=UpperCamelCase , return_dict_in_generate=UpperCamelCase , **UpperCamelCase , ) # We check the state of decoder_attentions and cross_attentions just from the last step __UpperCAmelCase : str = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 ) @unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' pass @require_torch @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow @unittest.skip( """Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=UpperCamelCase ).to(UpperCamelCase ) __UpperCAmelCase : Any = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=UpperCamelCase , legacy=UpperCamelCase ) __UpperCAmelCase : Dict = [ """Bonjour monsieur <extra_id_0> bien <extra_id_1>.""", """No se como puedo <extra_id_0>.""", """This is the reason why we <extra_id_0> them.""", """The <extra_id_0> walks in <extra_id_1>, seats""", """A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""", ] __UpperCAmelCase : int = tokenizer(UpperCamelCase , return_tensors="""pt""" , padding=UpperCamelCase ).input_ids # fmt: off __UpperCAmelCase : List[Any] = torch.tensor( [ [ 38_530, 210_703, 256_299, 1_410, 256_298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 826, 321, 671, 25_922, 256_299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 1_460, 339, 312, 19_014, 10_620, 758, 256_299, 2_355,274, 1, 0, 0, 0, 0, 0, 0,0, 0], [ 517, 256_299, 14_869, 281, 301, 256_298, 275, 119_983,1, 0, 0, 0, 0, 0, 0, 0,0, 0], [ 320, 256_299, 14_869, 281, 2_234, 289, 2_275, 333,61_391, 289, 256_298, 543, 256_297, 168_714, 329, 256_296,274, 1], ] ) # fmt: on torch.testing.assert_allclose(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model.generate(input_ids.to(UpperCamelCase ) ) __UpperCAmelCase : Optional[Any] = [ """<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""", """<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", """<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""", ] __UpperCAmelCase : Tuple = tokenizer.batch_decode(UpperCamelCase ) self.assertEqual(UpperCamelCase , UpperCamelCase )
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCAmelCase : List[str] = 16 UpperCAmelCase : str = 32 def lowerCamelCase ( _UpperCamelCase : Accelerator , _UpperCamelCase : DatasetDict , _UpperCamelCase : List[int] , _UpperCamelCase : List[int] , _UpperCamelCase : int = 1_6 ) -> Tuple: '''simple docstring''' __UpperCAmelCase : List[str] = AutoTokenizer.from_pretrained("""bert-base-cased""" ) __UpperCAmelCase : int = DatasetDict( { """train""": dataset["""train"""].select(_UpperCamelCase ), """validation""": dataset["""train"""].select(_UpperCamelCase ), """test""": dataset["""validation"""], } ) def tokenize_function(_UpperCamelCase : List[str] ): # max_length=None => use the model max length (it's actually the default) __UpperCAmelCase : Optional[int] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_UpperCamelCase , max_length=_UpperCamelCase ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): __UpperCAmelCase : Dict = datasets.map( _UpperCamelCase , batched=_UpperCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library __UpperCAmelCase : str = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(_UpperCamelCase : Dict ): # On TPU it's best to pad everything to the same length or training will be very slow. __UpperCAmelCase : Optional[int] = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": __UpperCAmelCase : int = 1_6 elif accelerator.mixed_precision != "no": __UpperCAmelCase : Union[str, Any] = 8 else: __UpperCAmelCase : Any = None return tokenizer.pad( _UpperCamelCase , padding="""longest""" , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors="""pt""" , ) # Instantiate dataloaders. __UpperCAmelCase : int = DataLoader( tokenized_datasets["""train"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase ) __UpperCAmelCase : int = DataLoader( tokenized_datasets["""validation"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = DataLoader( tokenized_datasets["""test"""] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Dict = [] # Download the dataset __UpperCAmelCase : Tuple = load_dataset("""glue""" , """mrpc""" ) # Create our splits __UpperCAmelCase : Optional[int] = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator __UpperCAmelCase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __UpperCAmelCase : int = config["""lr"""] __UpperCAmelCase : Optional[int] = int(config["""num_epochs"""] ) __UpperCAmelCase : List[str] = int(config["""seed"""] ) __UpperCAmelCase : Dict = int(config["""batch_size"""] ) __UpperCAmelCase : Tuple = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation __UpperCAmelCase : List[str] = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: __UpperCAmelCase : Tuple = batch_size // MAX_GPU_BATCH_SIZE __UpperCAmelCase : Tuple = MAX_GPU_BATCH_SIZE set_seed(_UpperCamelCase ) # New Code # # Create our folds: __UpperCAmelCase : Any = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) __UpperCAmelCase : str = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(_UpperCamelCase ): __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = get_fold_dataloaders( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __UpperCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_UpperCamelCase ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __UpperCAmelCase : str = model.to(accelerator.device ) # Instantiate optimizer __UpperCAmelCase : Tuple = AdamW(params=model.parameters() , lr=_UpperCamelCase ) # Instantiate scheduler __UpperCAmelCase : str = get_linear_schedule_with_warmup( optimizer=_UpperCamelCase , num_warmup_steps=1_0_0 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Tuple = accelerator.prepare( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Now we train the model for epoch in range(_UpperCamelCase ): model.train() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) __UpperCAmelCase : Any = model(**_UpperCamelCase ) __UpperCAmelCase : List[str] = outputs.loss __UpperCAmelCase : Any = loss / gradient_accumulation_steps accelerator.backward(_UpperCamelCase ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCAmelCase : Tuple = model(**_UpperCamelCase ) __UpperCAmelCase : Optional[int] = outputs.logits.argmax(dim=-1 ) __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=_UpperCamelCase , references=_UpperCamelCase , ) __UpperCAmelCase : List[Any] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _UpperCamelCase ) # New Code # # We also run predictions on the test set at the very end __UpperCAmelCase : Tuple = [] for step, batch in enumerate(_UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): __UpperCAmelCase : Tuple = model(**_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = outputs.logits __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(_UpperCamelCase , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: __UpperCAmelCase : List[Any] = torch.cat(_UpperCamelCase , dim=0 ) __UpperCAmelCase : Tuple = torch.stack(_UpperCamelCase , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) __UpperCAmelCase : List[str] = metric.compute(predictions=_UpperCamelCase , references=_UpperCamelCase ) accelerator.print("""Average test metrics from all folds:""" , _UpperCamelCase ) def lowerCamelCase ( ) -> int: '''simple docstring''' __UpperCAmelCase : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=_UpperCamelCase , default=_UpperCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=_UpperCamelCase , default=3 , help="""The number of splits to perform across the dataset""" ) __UpperCAmelCase : Any = parser.parse_args() __UpperCAmelCase : List[str] = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 4_2, """batch_size""": 1_6} training_function(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": main()
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" import os import tempfile import unittest from transformers import NezhaConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, NezhaModel, ) from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Any=13 , UpperCamelCase : int=7 , UpperCamelCase : str=True , UpperCamelCase : Any=True , UpperCamelCase : Tuple=True , UpperCamelCase : int=True , UpperCamelCase : Any=99 , UpperCamelCase : List[str]=32 , UpperCamelCase : List[Any]=5 , UpperCamelCase : Tuple=4 , UpperCamelCase : Dict=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : List[str]=0.1 , UpperCamelCase : Dict=128 , UpperCamelCase : List[str]=32 , UpperCamelCase : Any=16 , UpperCamelCase : Any=2 , UpperCamelCase : str=0.02 , UpperCamelCase : Any=3 , UpperCamelCase : str=4 , UpperCamelCase : List[Any]=None , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : Any = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : str = use_input_mask __UpperCAmelCase : Optional[int] = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Any = vocab_size __UpperCAmelCase : int = hidden_size __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : List[str] = num_attention_heads __UpperCAmelCase : Union[str, Any] = intermediate_size __UpperCAmelCase : str = hidden_act __UpperCAmelCase : List[Any] = hidden_dropout_prob __UpperCAmelCase : Optional[Any] = attention_probs_dropout_prob __UpperCAmelCase : Tuple = max_position_embeddings __UpperCAmelCase : Optional[Any] = type_vocab_size __UpperCAmelCase : List[str] = type_sequence_label_size __UpperCAmelCase : str = initializer_range __UpperCAmelCase : Optional[int] = num_labels __UpperCAmelCase : Tuple = num_choices __UpperCAmelCase : Tuple = scope def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Optional[Any] = None if self.use_input_mask: __UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Any = None if self.use_token_type_ids: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : Any = None __UpperCAmelCase : Tuple = None __UpperCAmelCase : List[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : Union[str, Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return NezhaConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase , initializer_range=self.initializer_range , ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Tuple = self.prepare_config_and_inputs() __UpperCAmelCase : Any = True __UpperCAmelCase : int = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) __UpperCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = NezhaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) __UpperCAmelCase : Any = model(UpperCamelCase , token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : List[str] , ): '''simple docstring''' __UpperCAmelCase : int = True __UpperCAmelCase : Union[str, Any] = NezhaModel(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Optional[int] = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , encoder_attention_mask=UpperCamelCase , ) __UpperCAmelCase : Any = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , encoder_hidden_states=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Dict , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = NezhaForMaskedLM(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Any = NezhaForNextSentencePrediction(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : int = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = NezhaForPreTraining(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : int = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , next_sentence_label=UpperCamelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Tuple = NezhaForQuestionAnswering(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : List[str] = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , start_positions=UpperCamelCase , end_positions=UpperCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : Any = self.num_labels __UpperCAmelCase : int = NezhaForSequenceClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Any = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = self.num_labels __UpperCAmelCase : Optional[Any] = NezhaForTokenClassification(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Optional[Any] = model(UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Dict , UpperCamelCase : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.num_choices __UpperCAmelCase : Any = NezhaForMultipleChoice(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() __UpperCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Dict = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __UpperCAmelCase : Any = model( UpperCamelCase , attention_mask=UpperCamelCase , token_type_ids=UpperCamelCase , labels=UpperCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Union[str, Any] = config_and_inputs __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class lowerCamelCase__ ( A , A , A , unittest.TestCase ): """simple docstring""" __a = ( ( NezhaModel, NezhaForMaskedLM, NezhaForMultipleChoice, NezhaForNextSentencePrediction, NezhaForPreTraining, NezhaForQuestionAnswering, NezhaForSequenceClassification, NezhaForTokenClassification, ) if is_torch_available() else () ) __a = ( { """feature-extraction""": NezhaModel, """fill-mask""": NezhaForMaskedLM, """question-answering""": NezhaForQuestionAnswering, """text-classification""": NezhaForSequenceClassification, """token-classification""": NezhaForTokenClassification, """zero-shot""": NezhaForSequenceClassification, } if is_torch_available() else {} ) __a = True def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int]=False ): '''simple docstring''' __UpperCAmelCase : str = super()._prepare_for_class(UpperCamelCase , UpperCamelCase , return_labels=UpperCamelCase ) if return_labels: if model_class in get_values(UpperCamelCase ): __UpperCAmelCase : str = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=UpperCamelCase ) __UpperCAmelCase : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=UpperCamelCase ) return inputs_dict def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Dict = NezhaModelTester(self ) __UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCamelCase , hidden_size=37 ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*UpperCamelCase ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() __UpperCAmelCase : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_next_sequence_prediction(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __UpperCAmelCase : List[Any] = NezhaModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow @require_torch_gpu def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # NezhaForMultipleChoice behaves incorrectly in JIT environments. if model_class == NezhaForMultipleChoice: return __UpperCAmelCase : int = True __UpperCAmelCase : int = model_class(config=UpperCamelCase ) __UpperCAmelCase : Any = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[str] = torch.jit.trace( UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(UpperCamelCase , os.path.join(UpperCamelCase , """bert.pt""" ) ) __UpperCAmelCase : int = torch.jit.load(os.path.join(UpperCamelCase , """bert.pt""" ) , map_location=UpperCamelCase ) loaded(inputs_dict["""input_ids"""].to(UpperCamelCase ) , inputs_dict["""attention_mask"""].to(UpperCamelCase ) ) @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" ) __UpperCAmelCase : Optional[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCAmelCase : Optional[int] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] __UpperCAmelCase : Any = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase ) __UpperCAmelCase : Any = torch.tensor([[[0.0685, 0.2441, 0.1102], [0.0600, 0.1906, 0.1349], [0.0221, 0.0819, 0.0586]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : int = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" ) __UpperCAmelCase : List[str] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) __UpperCAmelCase : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __UpperCAmelCase : List[str] = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] __UpperCAmelCase : str = torch.Size((1, 6, 21_128) ) self.assertEqual(output.shape , UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.tensor( [[-2.7939, -1.7902, -2.2189], [-2.8585, -1.8908, -2.3723], [-2.6499, -1.7750, -2.2558]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features UpperCAmelCase : Optional[int] = logging.get_logger(__name__) UpperCAmelCase : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) UpperCAmelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class lowerCamelCase__ : """simple docstring""" __a = field( default=A , metadata={"""help""": """Model type selected in the list: """ + """, """.join(A )} ) __a = field( default=A , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} ) __a = field( default=128 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __a = field( default=128 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , ) __a = field( default=64 , metadata={ """help""": ( """The maximum number of tokens for the question. Questions longer than this will """ """be truncated to this length.""" ) } , ) __a = field( default=30 , metadata={ """help""": ( """The maximum length of an answer that can be generated. This is needed because the start """ """and end predictions are not conditioned on one another.""" ) } , ) __a = field( default=A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) __a = field( default=A , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} ) __a = field( default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) __a = field( default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} ) __a = field( default=0 , metadata={ """help""": ( """language id of input for language-specific xlm models (see""" """ tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)""" ) } , ) __a = field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} ) class lowerCamelCase__ ( A ): """simple docstring""" __a = """train""" __a = """dev""" class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 __a = 42 __a = 42 __a = 42 def __init__( self : int , UpperCamelCase : SquadDataTrainingArguments , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : Optional[int] = None , UpperCamelCase : Union[str, Split] = Split.train , UpperCamelCase : Optional[bool] = False , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[str] = "pt" , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = args __UpperCAmelCase : Optional[Any] = is_language_sensitive __UpperCAmelCase : str = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(UpperCamelCase , UpperCamelCase ): try: __UpperCAmelCase : List[str] = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""" ) __UpperCAmelCase : int = mode # Load data features from cache or dataset file __UpperCAmelCase : Tuple = """v2""" if args.version_2_with_negative else """v1""" __UpperCAmelCase : Tuple = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __UpperCAmelCase : int = cached_features_file + """.lock""" with FileLock(UpperCamelCase ): if os.path.exists(UpperCamelCase ) and not args.overwrite_cache: __UpperCAmelCase : List[str] = time.time() __UpperCAmelCase : str = torch.load(UpperCamelCase ) # Legacy cache files have only features, while new cache files # will have dataset and examples also. __UpperCAmelCase : int = self.old_features["""features"""] __UpperCAmelCase : int = self.old_features.get("""dataset""" , UpperCamelCase ) __UpperCAmelCase : Optional[int] = self.old_features.get("""examples""" , UpperCamelCase ) logger.info( f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start ) if self.dataset is None or self.examples is None: logger.warning( f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in''' """ future run""" ) else: if mode == Split.dev: __UpperCAmelCase : List[Any] = self.processor.get_dev_examples(args.data_dir ) else: __UpperCAmelCase : Any = self.processor.get_train_examples(args.data_dir ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = squad_convert_examples_to_features( examples=self.examples , tokenizer=UpperCamelCase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=UpperCamelCase , ) __UpperCAmelCase : str = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , UpperCamelCase , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' ) def __len__( self : Optional[Any] ): '''simple docstring''' return len(self.features ) def __getitem__( self : List[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.features[i] __UpperCAmelCase : List[Any] = torch.tensor(feature.input_ids , dtype=torch.long ) __UpperCAmelCase : List[str] = torch.tensor(feature.attention_mask , dtype=torch.long ) __UpperCAmelCase : str = torch.tensor(feature.token_type_ids , dtype=torch.long ) __UpperCAmelCase : str = torch.tensor(feature.cls_index , dtype=torch.long ) __UpperCAmelCase : Tuple = torch.tensor(feature.p_mask , dtype=torch.float ) __UpperCAmelCase : str = torch.tensor(feature.is_impossible , dtype=torch.float ) __UpperCAmelCase : Optional[int] = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} ) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible} ) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} ) if self.mode == Split.train: __UpperCAmelCase : str = torch.tensor(feature.start_position , dtype=torch.long ) __UpperCAmelCase : Union[str, Any] = torch.tensor(feature.end_position , dtype=torch.long ) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} ) return inputs
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" import copy from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto.configuration_auto import AutoConfig if TYPE_CHECKING: from ... import PreTrainedTokenizerBase, TensorType UpperCAmelCase : Optional[Any] = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" __a = """vision-encoder-decoder""" __a = True def __init__( self : Union[str, Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' super().__init__(**UpperCamelCase ) if "encoder" not in kwargs or "decoder" not in kwargs: raise ValueError( f'''A configuraton of type {self.model_type} cannot be instantiated because ''' f'''not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}''' ) __UpperCAmelCase : List[str] = kwargs.pop("""encoder""" ) __UpperCAmelCase : Union[str, Any] = encoder_config.pop("""model_type""" ) __UpperCAmelCase : List[Any] = kwargs.pop("""decoder""" ) __UpperCAmelCase : Any = decoder_config.pop("""model_type""" ) __UpperCAmelCase : int = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : List[str] = AutoConfig.for_model(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Optional[int] = True @classmethod def lowerCamelCase__ ( cls : Optional[Any] , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , **UpperCamelCase : List[Any] ): '''simple docstring''' logger.info("""Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config""" ) __UpperCAmelCase : List[str] = True __UpperCAmelCase : Dict = True return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Dict = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : Union[str, Any] = self.encoder.to_dict() __UpperCAmelCase : Optional[int] = self.decoder.to_dict() __UpperCAmelCase : List[Any] = self.__class__.model_type return output class lowerCamelCase__ ( A ): """simple docstring""" __a = version.parse("""1.11""" ) @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return 1e-4 @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return OrderedDict({"""last_hidden_state""": {0: """batch""", 1: """encoder_sequence"""}} ) class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = OrderedDict() __UpperCAmelCase : List[Any] = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __UpperCAmelCase : int = {0: """batch""", 1: """past_decoder_sequence + sequence"""} __UpperCAmelCase : List[str] = {0: """batch""", 1: """encoder_sequence"""} return common_inputs def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : "PreTrainedTokenizerBase" , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional["TensorType"] = None , ): '''simple docstring''' import torch __UpperCAmelCase : Union[str, Any] = OrderedDict() __UpperCAmelCase : List[str] = super().generate_dummy_inputs( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : str = dummy_input["""input_ids"""].shape __UpperCAmelCase : Union[str, Any] = (batch, encoder_sequence, self._config.encoder_hidden_size) __UpperCAmelCase : Any = dummy_input.pop("""input_ids""" ) __UpperCAmelCase : Dict = dummy_input.pop("""attention_mask""" ) __UpperCAmelCase : Optional[Any] = torch.zeros(UpperCamelCase ) return common_inputs class lowerCamelCase__ ( A ): """simple docstring""" @property def lowerCamelCase__ ( self : str ): '''simple docstring''' pass def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : PretrainedConfig ): '''simple docstring''' return VisionEncoderDecoderEncoderOnnxConfig(UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : PretrainedConfig , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" ): '''simple docstring''' __UpperCAmelCase : Dict = encoder_config.hidden_size return VisionEncoderDecoderDecoderOnnxConfig(UpperCamelCase , UpperCamelCase )
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> List[str]: '''simple docstring''' if b == 0: return 1 if (b % 2) == 0: return actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) ) else: return a * actual_power(_UpperCamelCase , int(b / 2 ) ) * actual_power(_UpperCamelCase , int(b / 2 ) ) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> float: '''simple docstring''' if b < 0: return 1 / actual_power(_UpperCamelCase , _UpperCamelCase ) return actual_power(_UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": print(power(-2, -3))
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = KandinskyVaaControlnetPipeline __a = ["""image_embeds""", """negative_image_embeds""", """hint"""] __a = ["""image_embeds""", """negative_image_embeds""", """hint"""] __a = [ """generator""", """height""", """width""", """latents""", """guidance_scale""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] __a = False @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return 100 @property def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __UpperCAmelCase : Dict = UNetaDConditionModel(**UpperCamelCase ) return model @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def lowerCamelCase__ ( self : int ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = self.dummy_unet __UpperCAmelCase : str = self.dummy_movq __UpperCAmelCase : Optional[int] = DDIMScheduler( num_train_timesteps=1_000 , beta_schedule="""linear""" , beta_start=0.00085 , beta_end=0.012 , clip_sample=UpperCamelCase , set_alpha_to_one=UpperCamelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=UpperCamelCase , ) __UpperCAmelCase : List[str] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase__ ( self : int , UpperCamelCase : Optional[int] , UpperCamelCase : Dict=0 ): '''simple docstring''' __UpperCAmelCase : List[str] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __UpperCAmelCase : int = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( UpperCamelCase ) # create hint __UpperCAmelCase : List[str] = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) if str(UpperCamelCase ).startswith("""mps""" ): __UpperCAmelCase : List[str] = torch.manual_seed(UpperCamelCase ) else: __UpperCAmelCase : Optional[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __UpperCAmelCase : List[Any] = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 64, """width""": 64, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = """cpu""" __UpperCAmelCase : Optional[int] = self.get_dummy_components() __UpperCAmelCase : Optional[Any] = self.pipeline_class(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : Optional[int] = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __UpperCAmelCase : Tuple = output.images __UpperCAmelCase : str = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] __UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : List[Any] = np.array( [0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" ) __UpperCAmelCase : Optional[Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinskyv22/hint_image_cat.png""" ) __UpperCAmelCase : Optional[int] = torch.from_numpy(np.array(UpperCamelCase ) ).float() / 255.0 __UpperCAmelCase : Optional[Any] = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) __UpperCAmelCase : Optional[int] = KandinskyVaaPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) __UpperCAmelCase : Dict = KandinskyVaaControlnetPipeline.from_pretrained( """kandinsky-community/kandinsky-2-2-controlnet-depth""" , torch_dtype=torch.floataa ) __UpperCAmelCase : Optional[Any] = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : str = """A robot, 4k photo""" __UpperCAmelCase : Tuple = torch.Generator(device="""cuda""" ).manual_seed(0 ) __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __UpperCAmelCase : int = torch.Generator(device="""cuda""" ).manual_seed(0 ) __UpperCAmelCase : Optional[Any] = pipeline( image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , hint=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , output_type="""np""" , ) __UpperCAmelCase : List[Any] = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" import unittest import numpy as np from transformers import DistilBertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.distilbert.modeling_flax_distilbert import ( FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertModel, ) class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple=13 , UpperCamelCase : str=7 , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : Tuple=True , UpperCamelCase : int=True , UpperCamelCase : Dict=99 , UpperCamelCase : Tuple=32 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Tuple=4 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu" , UpperCamelCase : List[Any]=0.1 , UpperCamelCase : Tuple=0.1 , UpperCamelCase : Optional[Any]=512 , UpperCamelCase : int=16 , UpperCamelCase : Optional[Any]=2 , UpperCamelCase : Optional[Any]=0.02 , UpperCamelCase : Tuple=4 , ): '''simple docstring''' __UpperCAmelCase : Dict = parent __UpperCAmelCase : Any = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : int = use_attention_mask __UpperCAmelCase : List[str] = use_token_type_ids __UpperCAmelCase : Union[str, Any] = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Union[str, Any] = hidden_size __UpperCAmelCase : str = num_hidden_layers __UpperCAmelCase : Optional[int] = num_attention_heads __UpperCAmelCase : Dict = intermediate_size __UpperCAmelCase : Dict = hidden_act __UpperCAmelCase : Optional[int] = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Any = max_position_embeddings __UpperCAmelCase : int = type_vocab_size __UpperCAmelCase : int = type_sequence_label_size __UpperCAmelCase : Union[str, Any] = initializer_range __UpperCAmelCase : List[str] = num_choices def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : str = None if self.use_attention_mask: __UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : Optional[int] = DistilBertConfig( vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=UpperCamelCase , ) return config, input_ids, attention_mask def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = self.prepare_config_and_inputs() __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Any = config_and_inputs __UpperCAmelCase : Any = {"""input_ids""": input_ids, """attention_mask""": attention_mask} return config, inputs_dict @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxDistilBertModel, FlaxDistilBertForMaskedLM, FlaxDistilBertForMultipleChoice, FlaxDistilBertForQuestionAnswering, FlaxDistilBertForSequenceClassification, FlaxDistilBertForTokenClassification, FlaxDistilBertForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Tuple = FlaxDistilBertModelTester(self ) @slow def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : List[str] = model_class_name.from_pretrained("""distilbert-base-uncased""" ) __UpperCAmelCase : Optional[int] = model(np.ones((1, 1) ) ) self.assertIsNotNone(UpperCamelCase ) @require_flax class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" @slow def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxDistilBertModel.from_pretrained("""distilbert-base-uncased""" ) __UpperCAmelCase : Any = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] ) __UpperCAmelCase : List[str] = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) __UpperCAmelCase : Tuple = model(UpperCamelCase , attention_mask=UpperCamelCase )[0] __UpperCAmelCase : int = (1, 11, 768) self.assertEqual(output.shape , UpperCamelCase ) __UpperCAmelCase : Tuple = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] ) self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , UpperCamelCase , atol=1e-4 ) )
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : str=13 , UpperCamelCase : Tuple=3 , UpperCamelCase : Union[str, Any]=224 , UpperCamelCase : int=30 , UpperCamelCase : List[str]=400 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=None , UpperCamelCase : str=True , UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , ): '''simple docstring''' __UpperCAmelCase : Dict = size if size is not None else {"""height""": 18, """width""": 18} __UpperCAmelCase : int = parent __UpperCAmelCase : List[Any] = batch_size __UpperCAmelCase : int = num_channels __UpperCAmelCase : Optional[int] = image_size __UpperCAmelCase : int = min_resolution __UpperCAmelCase : Optional[Any] = max_resolution __UpperCAmelCase : List[Any] = do_resize __UpperCAmelCase : List[Any] = size __UpperCAmelCase : Optional[int] = do_normalize __UpperCAmelCase : List[str] = image_mean __UpperCAmelCase : Dict = image_std def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ViTImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientFormerImageProcessorTester(self ) @property def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return self.image_proc_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """size""" ) ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' pass def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : Dict = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input __UpperCAmelCase : List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __UpperCAmelCase : List[str] = image_processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : str = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input __UpperCAmelCase : Optional[int] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __UpperCAmelCase : Any = image_processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Tuple = prepare_image_inputs(self.image_proc_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input __UpperCAmelCase : Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , ) # Test batched __UpperCAmelCase : Tuple = image_processor(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_proc_tester.batch_size, self.image_proc_tester.num_channels, self.image_proc_tester.size["""height"""], self.image_proc_tester.size["""width"""], ) , )
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" from sklearn.metrics import mean_squared_error import datasets UpperCAmelCase : Optional[Any] = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' UpperCAmelCase : Dict = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n' UpperCAmelCase : Union[str, Any] = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[ """https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html""" ] , ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' if self.config_name == "multilist": return { "predictions": datasets.Sequence(datasets.Value("""float""" ) ), "references": datasets.Sequence(datasets.Value("""float""" ) ), } else: return { "predictions": datasets.Value("""float""" ), "references": datasets.Value("""float""" ), } def lowerCamelCase__ ( self : Any , UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[Any]="uniform_average" , UpperCamelCase : int=True ): '''simple docstring''' __UpperCAmelCase : Dict = mean_squared_error( UpperCamelCase , UpperCamelCase , sample_weight=UpperCamelCase , multioutput=UpperCamelCase , squared=UpperCamelCase ) return {"mse": mse}
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" import argparse import json from typing import List from ltp import LTP from transformers.models.bert.tokenization_bert import BertTokenizer def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> int: '''simple docstring''' if ( (cp >= 0x4e_00 and cp <= 0x9f_ff) or (cp >= 0x34_00 and cp <= 0x4d_bf) # or (cp >= 0x2_00_00 and cp <= 0x2_a6_df) # or (cp >= 0x2_a7_00 and cp <= 0x2_b7_3f) # or (cp >= 0x2_b7_40 and cp <= 0x2_b8_1f) # or (cp >= 0x2_b8_20 and cp <= 0x2_ce_af) # or (cp >= 0xf9_00 and cp <= 0xfa_ff) or (cp >= 0x2_f8_00 and cp <= 0x2_fa_1f) # ): # return True return False def lowerCamelCase ( _UpperCamelCase : str ) -> Any: '''simple docstring''' for char in word: __UpperCAmelCase : Dict = ord(_UpperCamelCase ) if not _is_chinese_char(_UpperCamelCase ): return 0 return 1 def lowerCamelCase ( _UpperCamelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : str = set() for token in tokens: __UpperCAmelCase : List[Any] = len(_UpperCamelCase ) > 1 and is_chinese(_UpperCamelCase ) if chinese_word: word_set.add(_UpperCamelCase ) __UpperCAmelCase : List[str] = list(_UpperCamelCase ) return word_list def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : set() ) -> Optional[int]: '''simple docstring''' if not chinese_word_set: return bert_tokens __UpperCAmelCase : Optional[Any] = max([len(_UpperCamelCase ) for w in chinese_word_set] ) __UpperCAmelCase : Optional[Any] = bert_tokens __UpperCAmelCase ,__UpperCAmelCase : Tuple = 0, len(_UpperCamelCase ) while start < end: __UpperCAmelCase : List[Any] = True if is_chinese(bert_word[start] ): __UpperCAmelCase : Optional[Any] = min(end - start , _UpperCamelCase ) for i in range(_UpperCamelCase , 1 , -1 ): __UpperCAmelCase : Tuple = """""".join(bert_word[start : start + i] ) if whole_word in chinese_word_set: for j in range(start + 1 , start + i ): __UpperCAmelCase : List[str] = """##""" + bert_word[j] __UpperCAmelCase : int = start + i __UpperCAmelCase : Optional[int] = False break if single_word: start += 1 return bert_word def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : LTP , _UpperCamelCase : BertTokenizer ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : int = [] for i in range(0 , len(_UpperCamelCase ) , 1_0_0 ): __UpperCAmelCase : int = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws __UpperCAmelCase : Union[str, Any] = [get_chinese_word(_UpperCamelCase ) for r in res] ltp_res.extend(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) __UpperCAmelCase : Any = [] for i in range(0 , len(_UpperCamelCase ) , 1_0_0 ): __UpperCAmelCase : Tuple = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_UpperCamelCase , truncation=_UpperCamelCase , max_length=5_1_2 ) bert_res.extend(res["""input_ids"""] ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = [] for input_ids, chinese_word in zip(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = [] for id in input_ids: __UpperCAmelCase : str = bert_tokenizer._convert_id_to_token(_UpperCamelCase ) input_tokens.append(_UpperCamelCase ) __UpperCAmelCase : Tuple = add_sub_symbol(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Optional[Any] = [] # We only save pos of chinese subwords start with ##, which mean is part of a whole word. for i, token in enumerate(_UpperCamelCase ): if token[:2] == "##": __UpperCAmelCase : List[str] = token[2:] # save chinese tokens' pos if len(_UpperCamelCase ) == 1 and _is_chinese_char(ord(_UpperCamelCase ) ): ref_id.append(_UpperCamelCase ) ref_ids.append(_UpperCamelCase ) assert len(_UpperCamelCase ) == len(_UpperCamelCase ) return ref_ids def lowerCamelCase ( _UpperCamelCase : Any ) -> Union[str, Any]: '''simple docstring''' with open(args.file_name , """r""" , encoding="""utf-8""" ) as f: __UpperCAmelCase : List[Any] = f.readlines() __UpperCAmelCase : Tuple = [line.strip() for line in data if len(_UpperCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029' __UpperCAmelCase : Union[str, Any] = LTP(args.ltp ) # faster in GPU device __UpperCAmelCase : Optional[int] = BertTokenizer.from_pretrained(args.bert ) __UpperCAmelCase : List[Any] = prepare_ref(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) with open(args.save_path , """w""" , encoding="""utf-8""" ) as f: __UpperCAmelCase : Any = [json.dumps(_UpperCamelCase ) + """\n""" for ref in ref_ids] f.writelines(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : int = argparse.ArgumentParser(description='prepare_chinese_ref') parser.add_argument( '--file_name', required=False, type=str, default='./resources/chinese-demo.txt', help='file need process, same as training data in lm', ) parser.add_argument( '--ltp', required=False, type=str, default='./resources/ltp', help='resources for LTP tokenizer, usually a path', ) parser.add_argument( '--bert', required=False, type=str, default='./resources/robert', help='resources for Bert tokenizer', ) parser.add_argument( '--save_path', required=False, type=str, default='./resources/ref.txt', help='path to save res', ) UpperCAmelCase : Any = parser.parse_args() main(args)
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Dict = RobertaPreLayerNormConfig.from_pretrained( _UpperCamelCase , architectures=["""RobertaPreLayerNormForMaskedLM"""] ) # convert state_dict __UpperCAmelCase : Optional[Any] = torch.load(hf_hub_download(repo_id=_UpperCamelCase , filename="""pytorch_model.bin""" ) ) __UpperCAmelCase : Optional[int] = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith("""roberta.""" ): __UpperCAmelCase : Optional[int] = """roberta_prelayernorm.""" + tensor_key[len("""roberta.""" ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith(""".self.LayerNorm.weight""" ) or tensor_key.endswith(""".self.LayerNorm.bias""" ): continue __UpperCAmelCase : int = tensor_value __UpperCAmelCase : Any = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=_UpperCamelCase , config=_UpperCamelCase , state_dict=_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) # convert tokenizer __UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(_UpperCamelCase ) tokenizer.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint-repo', default=None, type=str, required=True, help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) UpperCAmelCase : str = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging UpperCAmelCase : str = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = {'vocab_file': 'spiece.model'} UpperCAmelCase : str = { 'vocab_file': { 'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model', 'google/bigbird-roberta-large': ( 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model' ), 'google/bigbird-base-trivia-itc': ( 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model' ), } } UpperCAmelCase : Dict = { 'google/bigbird-roberta-base': 4096, 'google/bigbird-roberta-large': 4096, 'google/bigbird-base-trivia-itc': 4096, } class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = ["""input_ids""", """attention_mask"""] __a = [] def __init__( self : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Any="<unk>" , UpperCamelCase : List[str]="<s>" , UpperCamelCase : str="</s>" , UpperCamelCase : Any="<pad>" , UpperCamelCase : Optional[Any]="[SEP]" , UpperCamelCase : Tuple="[MASK]" , UpperCamelCase : Dict="[CLS]" , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token __UpperCAmelCase : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token __UpperCAmelCase : List[str] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token __UpperCAmelCase : int = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token __UpperCAmelCase : List[Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token __UpperCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , pad_token=UpperCamelCase , sep_token=UpperCamelCase , mask_token=UpperCamelCase , cls_token=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , ) __UpperCAmelCase : Tuple = vocab_file __UpperCAmelCase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(UpperCamelCase ) @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.sp_model.get_piece_size() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : str = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.__dict__.copy() __UpperCAmelCase : Union[str, Any] = None return state def __setstate__( self : Any , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __UpperCAmelCase : Optional[int] = {} __UpperCAmelCase : Tuple = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def lowerCamelCase__ ( self : str , UpperCamelCase : str ): '''simple docstring''' return self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Tuple ): '''simple docstring''' return self.sp_model.piece_to_id(UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.sp_model.IdToPiece(UpperCamelCase ) return token def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Dict = [] __UpperCAmelCase : Union[str, Any] = """""" __UpperCAmelCase : int = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(UpperCamelCase ) + token __UpperCAmelCase : List[Any] = True __UpperCAmelCase : str = [] else: current_sub_tokens.append(UpperCamelCase ) __UpperCAmelCase : Dict = False out_string += self.sp_model.decode(UpperCamelCase ) return out_string.strip() def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : bool = False , UpperCamelCase : bool = None , UpperCamelCase : bool = True , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = kwargs.pop("""use_source_tokenizer""" , UpperCamelCase ) __UpperCAmelCase : str = self.convert_ids_to_tokens(UpperCamelCase , skip_special_tokens=UpperCamelCase ) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 __UpperCAmelCase : int = [] __UpperCAmelCase : List[str] = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase ) ) __UpperCAmelCase : Optional[Any] = [] sub_texts.append(UpperCamelCase ) else: current_sub_text.append(UpperCamelCase ) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(UpperCamelCase ) ) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: __UpperCAmelCase : List[Any] = re.sub(R""" (\[(MASK|SEP)\])""" , R"""\1""" , """ """.join(UpperCamelCase ) ) else: __UpperCAmelCase : Optional[int] = """""".join(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = ( clean_up_tokenization_spaces if clean_up_tokenization_spaces is not None else self.clean_up_tokenization_spaces ) if clean_up_tokenization_spaces: __UpperCAmelCase : Tuple = self.clean_up_tokenization(UpperCamelCase ) return clean_text else: return text def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : int = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase , """wb""" ) as fi: __UpperCAmelCase : Dict = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase ) return (out_vocab_file,) def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : int = [self.cls_token_id] __UpperCAmelCase : Optional[Any] = [self.sep_token_id] return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : int , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' __UpperCAmelCase : Any = [self.sep_token_id] __UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class lowerCamelCase__ : """simple docstring""" def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[str] , UpperCamelCase : str ): '''simple docstring''' return None class lowerCamelCase__ : """simple docstring""" def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' return None class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = [ # (model_name, model_kwargs) ("""bert-base-cased""", {}), ("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def lowerCamelCase__ ( self : int ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase , """tf""" , 12 , **UpperCamelCase ) @require_torch @slow def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(UpperCamelCase , """pt""" , 12 , **UpperCamelCase ) @require_torch @slow def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' from transformers import BertModel __UpperCAmelCase : str = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""] with NamedTemporaryFile(mode="""w+t""" ) as vocab_file: vocab_file.write("""\n""".join(UpperCamelCase ) ) vocab_file.flush() __UpperCAmelCase : Union[str, Any] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: __UpperCAmelCase : Optional[int] = BertModel(BertConfig(vocab_size=len(UpperCamelCase ) ) ) model.save_pretrained(UpperCamelCase ) self._test_export(UpperCamelCase , """pt""" , 12 , UpperCamelCase ) @require_tf @slow def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __UpperCAmelCase : Dict = self._test_export(UpperCamelCase , """tf""" , 12 , **UpperCamelCase ) __UpperCAmelCase : int = quantize(Path(UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) @require_torch @slow def lowerCamelCase__ ( self : Any ): '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: __UpperCAmelCase : List[Any] = self._test_export(UpperCamelCase , """pt""" , 12 , **UpperCamelCase ) __UpperCAmelCase : Dict = quantize(UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(UpperCamelCase ).stat().st_size: self.fail("""Quantized model is bigger than initial ONNX model""" ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Any=None , **UpperCamelCase : List[str] ): '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: __UpperCAmelCase : int = Path(UpperCamelCase ).joinpath("""model.onnx""" ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ) return path except Exception as e: self.fail(UpperCamelCase ) @require_torch @require_tokenizers @slow def lowerCamelCase__ ( self : Any ): '''simple docstring''' from transformers import BertModel __UpperCAmelCase : List[str] = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) __UpperCAmelCase : Tuple = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(UpperCamelCase , UpperCamelCase , """pt""" ) @require_tf @require_tokenizers @slow def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' from transformers import TFBertModel __UpperCAmelCase : Union[str, Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) ) __UpperCAmelCase : Dict = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" ) self._test_infer_dynamic_axis(UpperCamelCase , UpperCamelCase , """tf""" ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = FeatureExtractionPipeline(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""] __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : str = infer_shapes(UpperCamelCase , UpperCamelCase ) # Assert all variables are present self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] , UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] , UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} ) self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = ["""input_ids""", """attention_mask""", """token_type_ids"""] __UpperCAmelCase : Optional[int] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]} __UpperCAmelCase ,__UpperCAmelCase : Tuple = ensure_valid_input(FuncContiguousArgs() , UpperCamelCase , UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(UpperCamelCase ) , 3 ) # Should have exactly the same input names self.assertEqual(set(UpperCamelCase ) , set(UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(UpperCamelCase , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() , UpperCamelCase , UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(UpperCamelCase ) , 1 ) self.assertEqual(len(UpperCamelCase ) , 1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] , tokens["""input_ids"""] ) self.assertEqual(ordered_input_names[0] , """input_ids""" ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" ) self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import XLMRobertaTokenizerFast from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = KandinskyImgaImgPipeline __a = ["""prompt""", """image_embeds""", """negative_image_embeds""", """image"""] __a = [ """prompt""", """negative_prompt""", """image_embeds""", """negative_image_embeds""", """image""", ] __a = [ """generator""", """height""", """width""", """strength""", """guidance_scale""", """negative_prompt""", """num_inference_steps""", """return_dict""", """guidance_scale""", """num_images_per_prompt""", """output_type""", """return_dict""", ] __a = False @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return self.time_input_dim @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return self.time_input_dim * 4 @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return 100 @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Dict = XLMRobertaTokenizerFast.from_pretrained("""YiYiXu/tiny-random-mclip-base""" ) return tokenizer @property def lowerCamelCase__ ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = MCLIPConfig( numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_005 , ) __UpperCAmelCase : List[Any] = MultilingualCLIP(UpperCamelCase ) __UpperCAmelCase : Any = text_encoder.eval() return text_encoder @property def lowerCamelCase__ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Optional[Any] = { """in_channels""": 4, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """text_image""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """text_image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } __UpperCAmelCase : Tuple = UNetaDConditionModel(**UpperCamelCase ) return model @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return { "block_out_channels": [32, 64], "down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": [ "AttnUpDecoderBlock2D", "UpDecoderBlock2D", ], "vq_embed_dim": 4, } @property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : Dict = VQModel(**self.dummy_movq_kwargs ) return model def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.dummy_text_encoder __UpperCAmelCase : int = self.dummy_tokenizer __UpperCAmelCase : Tuple = self.dummy_unet __UpperCAmelCase : Union[str, Any] = self.dummy_movq __UpperCAmelCase : Optional[Any] = { """num_train_timesteps""": 1_000, """beta_schedule""": """linear""", """beta_start""": 0.00085, """beta_end""": 0.012, """clip_sample""": False, """set_alpha_to_one""": False, """steps_offset""": 0, """prediction_type""": """epsilon""", """thresholding""": False, } __UpperCAmelCase : str = DDIMScheduler(**UpperCamelCase ) __UpperCAmelCase : Optional[Any] = { """text_encoder""": text_encoder, """tokenizer""": tokenizer, """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def lowerCamelCase__ ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple=0 ): '''simple docstring''' __UpperCAmelCase : Any = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __UpperCAmelCase : Optional[int] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(UpperCamelCase ) # create init_image __UpperCAmelCase : int = floats_tensor((1, 3, 64, 64) , rng=random.Random(UpperCamelCase ) ).to(UpperCamelCase ) __UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0] __UpperCAmelCase : Dict = Image.fromarray(np.uinta(UpperCamelCase ) ).convert("""RGB""" ).resize((256, 256) ) if str(UpperCamelCase ).startswith("""mps""" ): __UpperCAmelCase : List[Any] = torch.manual_seed(UpperCamelCase ) else: __UpperCAmelCase : Optional[Any] = torch.Generator(device=UpperCamelCase ).manual_seed(UpperCamelCase ) __UpperCAmelCase : List[str] = { """prompt""": """horse""", """image""": init_image, """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """generator""": generator, """height""": 64, """width""": 64, """num_inference_steps""": 10, """guidance_scale""": 7.0, """strength""": 0.2, """output_type""": """np""", } return inputs def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Tuple = """cpu""" __UpperCAmelCase : Optional[Any] = self.get_dummy_components() __UpperCAmelCase : Optional[Any] = self.pipeline_class(**UpperCamelCase ) __UpperCAmelCase : Optional[Any] = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = pipe(**self.get_dummy_inputs(UpperCamelCase ) ) __UpperCAmelCase : Dict = output.images __UpperCAmelCase : List[str] = pipe( **self.get_dummy_inputs(UpperCamelCase ) , return_dict=UpperCamelCase , )[0] __UpperCAmelCase : List[str] = image[0, -3:, -3:, -1] __UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) __UpperCAmelCase : int = np.array( [0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Any = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/kandinsky_img2img_frog.npy""" ) __UpperCAmelCase : Union[str, Any] = load_image( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/kandinsky/cat.png""" ) __UpperCAmelCase : Optional[int] = """A red cartoon frog, 4k""" __UpperCAmelCase : Dict = KandinskyPriorPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1-prior""" , torch_dtype=torch.floataa ) pipe_prior.to(UpperCamelCase ) __UpperCAmelCase : Dict = KandinskyImgaImgPipeline.from_pretrained( """kandinsky-community/kandinsky-2-1""" , torch_dtype=torch.floataa ) __UpperCAmelCase : Dict = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : List[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 ) __UpperCAmelCase ,__UpperCAmelCase : Dict = pipe_prior( UpperCamelCase , generator=UpperCamelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple() __UpperCAmelCase : int = pipeline( UpperCamelCase , image=UpperCamelCase , image_embeds=UpperCamelCase , negative_image_embeds=UpperCamelCase , generator=UpperCamelCase , num_inference_steps=100 , height=768 , width=768 , strength=0.2 , output_type="""np""" , ) __UpperCAmelCase : Optional[int] = output.images[0] assert image.shape == (768, 768, 3) assert_mean_pixel_difference(UpperCamelCase , UpperCamelCase )
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 5_0_0_0_0_0_0_0 ) -> int: '''simple docstring''' __UpperCAmelCase : Tuple = set() __UpperCAmelCase : int = int((limit - 2_4) ** (1 / 2) ) __UpperCAmelCase : Optional[Any] = set(range(3 , prime_square_limit + 1 , 2 ) ) primes.add(2 ) for p in range(3 , prime_square_limit + 1 , 2 ): if p not in primes: continue primes.difference_update(set(range(p * p , prime_square_limit + 1 , _UpperCamelCase ) ) ) for primea in primes: __UpperCAmelCase : List[str] = primea * primea for primea in primes: __UpperCAmelCase : str = primea * primea * primea if square + cube >= limit - 1_6: break for primea in primes: __UpperCAmelCase : Tuple = primea * primea * primea * primea __UpperCAmelCase : Dict = square + cube + tetr if total >= limit: break ret.add(_UpperCamelCase ) return len(_UpperCamelCase ) if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str ) -> bool: '''simple docstring''' __UpperCAmelCase : List[str] = get_failure_array(_UpperCamelCase ) # 2) Step through text searching for pattern __UpperCAmelCase ,__UpperCAmelCase : List[Any] = 0, 0 # index into text, pattern while i < len(_UpperCamelCase ): if pattern[j] == text[i]: if j == (len(_UpperCamelCase ) - 1): return True j += 1 # if this is a prefix in our pattern # just go back far enough to continue elif j > 0: __UpperCAmelCase : Union[str, Any] = failure[j - 1] continue i += 1 return False def lowerCamelCase ( _UpperCamelCase : str ) -> list[int]: '''simple docstring''' __UpperCAmelCase : List[str] = [0] __UpperCAmelCase : int = 0 __UpperCAmelCase : Optional[int] = 1 while j < len(_UpperCamelCase ): if pattern[i] == pattern[j]: i += 1 elif i > 0: __UpperCAmelCase : List[str] = failure[i - 1] continue j += 1 failure.append(_UpperCamelCase ) return failure if __name__ == "__main__": # Test 1) UpperCAmelCase : Tuple = 'abc1abc12' UpperCAmelCase : Dict = 'alskfjaldsabc1abc1abc12k23adsfabcabc' UpperCAmelCase : str = 'alskfjaldsk23adsfabcabc' assert kmp(pattern, texta) and not kmp(pattern, texta) # Test 2) UpperCAmelCase : Optional[int] = 'ABABX' UpperCAmelCase : List[str] = 'ABABZABABYABABX' assert kmp(pattern, text) # Test 3) UpperCAmelCase : Union[str, Any] = 'AAAB' UpperCAmelCase : str = 'ABAAAAAB' assert kmp(pattern, text) # Test 4) UpperCAmelCase : Any = 'abcdabcy' UpperCAmelCase : int = 'abcxabcdabxabcdabcdabcy' assert kmp(pattern, text) # Test 5) UpperCAmelCase : List[Any] = 'aabaabaaa' assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" import argparse import torch from safetensors.torch import load_file from diffusers import StableDiffusionPipeline def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int , _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> List[str]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = StableDiffusionPipeline.from_pretrained(_UpperCamelCase , torch_dtype=torch.floataa ) # load LoRA weight from .safetensors __UpperCAmelCase : Any = load_file(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [] # directly update weight in diffusers model for key in state_dict: # it is suggested to print out the key, it usually will be something like below # "lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight" # as we have set the alpha beforehand, so just skip if ".alpha" in key or key in visited: continue if "text" in key: __UpperCAmelCase : List[str] = key.split(""".""" )[0].split(LORA_PREFIX_TEXT_ENCODER + """_""" )[-1].split("""_""" ) __UpperCAmelCase : Dict = pipeline.text_encoder else: __UpperCAmelCase : Union[str, Any] = key.split(""".""" )[0].split(LORA_PREFIX_UNET + """_""" )[-1].split("""_""" ) __UpperCAmelCase : int = pipeline.unet # find the target layer __UpperCAmelCase : Any = layer_infos.pop(0 ) while len(_UpperCamelCase ) > -1: try: __UpperCAmelCase : Optional[int] = curr_layer.__getattr__(_UpperCamelCase ) if len(_UpperCamelCase ) > 0: __UpperCAmelCase : Union[str, Any] = layer_infos.pop(0 ) elif len(_UpperCamelCase ) == 0: break except Exception: if len(_UpperCamelCase ) > 0: temp_name += "_" + layer_infos.pop(0 ) else: __UpperCAmelCase : Optional[int] = layer_infos.pop(0 ) __UpperCAmelCase : Dict = [] if "lora_down" in key: pair_keys.append(key.replace("""lora_down""" , """lora_up""" ) ) pair_keys.append(_UpperCamelCase ) else: pair_keys.append(_UpperCamelCase ) pair_keys.append(key.replace("""lora_up""" , """lora_down""" ) ) # update weight if len(state_dict[pair_keys[0]].shape ) == 4: __UpperCAmelCase : List[Any] = state_dict[pair_keys[0]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) __UpperCAmelCase : Any = state_dict[pair_keys[1]].squeeze(3 ).squeeze(2 ).to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ).unsqueeze(2 ).unsqueeze(3 ) else: __UpperCAmelCase : Any = state_dict[pair_keys[0]].to(torch.floataa ) __UpperCAmelCase : Dict = state_dict[pair_keys[1]].to(torch.floataa ) curr_layer.weight.data += alpha * torch.mm(_UpperCamelCase , _UpperCamelCase ) # update visited list for item in pair_keys: visited.append(_UpperCamelCase ) return pipeline if __name__ == "__main__": UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '--base_model_path', default=None, type=str, required=True, help='Path to the base model in diffusers format.' ) parser.add_argument( '--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.' ) parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.') parser.add_argument( '--lora_prefix_unet', default='lora_unet', type=str, help='The prefix of UNet weight in safetensors' ) parser.add_argument( '--lora_prefix_text_encoder', default='lora_te', type=str, help='The prefix of text encoder weight in safetensors', ) parser.add_argument('--alpha', default=0.75, type=float, help='The merging ratio in W = W0 + alpha * deltaW') parser.add_argument( '--to_safetensors', action='store_true', help='Whether to store pipeline in safetensors format or not.' ) parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)') UpperCAmelCase : str = parser.parse_args() UpperCAmelCase : Optional[Any] = args.base_model_path UpperCAmelCase : Any = args.checkpoint_path UpperCAmelCase : Tuple = args.dump_path UpperCAmelCase : Dict = args.lora_prefix_unet UpperCAmelCase : List[Any] = args.lora_prefix_text_encoder UpperCAmelCase : Union[str, Any] = args.alpha UpperCAmelCase : List[str] = convert(base_model_path, checkpoint_path, lora_prefix_unet, lora_prefix_text_encoder, alpha) UpperCAmelCase : Optional[Any] = pipe.to(args.device) pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" from collections import OrderedDict from typing import Any, List, Mapping, Optional from ... import PreTrainedTokenizer, TensorType, is_torch_available from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfigWithPast, PatchingSpec from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'EleutherAI/gpt-j-6B': 'https://huggingface.co/EleutherAI/gpt-j-6B/resolve/main/config.json', # See all GPT-J models at https://huggingface.co/models?filter=gpt_j } class lowerCamelCase__ ( A ): """simple docstring""" __a = """gptj""" __a = { """max_position_embeddings""": """n_positions""", """hidden_size""": """n_embd""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Tuple , UpperCamelCase : Dict=50_400 , UpperCamelCase : List[str]=2_048 , UpperCamelCase : Tuple=4_096 , UpperCamelCase : Union[str, Any]=28 , UpperCamelCase : List[Any]=16 , UpperCamelCase : str=64 , UpperCamelCase : int=None , UpperCamelCase : Any="gelu_new" , UpperCamelCase : int=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : int=0.0 , UpperCamelCase : str=1e-5 , UpperCamelCase : Union[str, Any]=0.02 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Any=50_256 , UpperCamelCase : Dict=50_256 , UpperCamelCase : Dict=False , **UpperCamelCase : str , ): '''simple docstring''' __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Dict = n_positions __UpperCAmelCase : List[str] = n_embd __UpperCAmelCase : str = n_layer __UpperCAmelCase : List[str] = n_head __UpperCAmelCase : Tuple = n_inner __UpperCAmelCase : Any = rotary_dim __UpperCAmelCase : Optional[Any] = activation_function __UpperCAmelCase : Optional[Any] = resid_pdrop __UpperCAmelCase : Any = embd_pdrop __UpperCAmelCase : List[Any] = attn_pdrop __UpperCAmelCase : Optional[Any] = layer_norm_epsilon __UpperCAmelCase : Optional[Any] = initializer_range __UpperCAmelCase : str = use_cache __UpperCAmelCase : Tuple = bos_token_id __UpperCAmelCase : str = eos_token_id super().__init__( bos_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , **UpperCamelCase ) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : PretrainedConfig , UpperCamelCase : str = "default" , UpperCamelCase : List[PatchingSpec] = None , UpperCamelCase : bool = False , ): '''simple docstring''' super().__init__(UpperCamelCase , task=UpperCamelCase , patching_specs=UpperCamelCase , use_past=UpperCamelCase ) if not getattr(self._config , """pad_token_id""" , UpperCamelCase ): # TODO: how to do that better? __UpperCAmelCase : int = 0 @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : str = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} ) if self.use_past: self.fill_with_past_key_values_(UpperCamelCase , direction="""inputs""" ) __UpperCAmelCase : int = {0: """batch""", 1: """past_sequence + sequence"""} else: __UpperCAmelCase : List[str] = {0: """batch""", 1: """sequence"""} return common_inputs @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self._config.n_layer @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return self._config.n_head def lowerCamelCase__ ( self : int , UpperCamelCase : PreTrainedTokenizer , UpperCamelCase : int = -1 , UpperCamelCase : int = -1 , UpperCamelCase : bool = False , UpperCamelCase : Optional[TensorType] = None , ): '''simple docstring''' __UpperCAmelCase : Tuple = super(UpperCamelCase , self ).generate_dummy_inputs( UpperCamelCase , batch_size=UpperCamelCase , seq_length=UpperCamelCase , is_pair=UpperCamelCase , framework=UpperCamelCase ) # We need to order the input in the way they appears in the forward() __UpperCAmelCase : Dict = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} ) # Need to add the past_keys if self.use_past: if not is_torch_available(): raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" ) else: import torch __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = common_inputs["""input_ids"""].shape # Not using the same length for past_key_values __UpperCAmelCase : Union[str, Any] = seqlen + 2 __UpperCAmelCase : List[Any] = ( batch, self.num_attention_heads, past_key_values_length, self._config.hidden_size // self.num_attention_heads, ) __UpperCAmelCase : Union[str, Any] = [ (torch.zeros(UpperCamelCase ), torch.zeros(UpperCamelCase )) for _ in range(self.num_layers ) ] __UpperCAmelCase : Union[str, Any] = common_inputs["""attention_mask"""] if self.use_past: __UpperCAmelCase : List[Any] = ordered_inputs["""attention_mask"""].dtype __UpperCAmelCase : List[Any] = torch.cat( [ordered_inputs["""attention_mask"""], torch.ones(UpperCamelCase , UpperCamelCase , dtype=UpperCamelCase )] , dim=1 ) return ordered_inputs @property def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return 13
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCAmelCase : List[str] = { 'configuration_x_clip': [ 'XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XCLIPConfig', 'XCLIPTextConfig', 'XCLIPVisionConfig', ], 'processing_x_clip': ['XCLIPProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ 'XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST', 'XCLIPModel', 'XCLIPPreTrainedModel', 'XCLIPTextModel', 'XCLIPVisionModel', ] if TYPE_CHECKING: from .configuration_x_clip import ( XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, XCLIPConfig, XCLIPTextConfig, XCLIPVisionConfig, ) from .processing_x_clip import XCLIPProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_x_clip import ( XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST, XCLIPModel, XCLIPPreTrainedModel, XCLIPTextModel, XCLIPVisionModel, ) else: import sys UpperCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int ) -> bool: '''simple docstring''' return str(_UpperCamelCase ) == str(_UpperCamelCase )[::-1] def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' return int(_UpperCamelCase ) + int(str(_UpperCamelCase )[::-1] ) def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0_0 ) -> int: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] for num in range(1 , _UpperCamelCase ): __UpperCAmelCase : Any = 0 __UpperCAmelCase : Dict = num while iterations < 5_0: __UpperCAmelCase : Optional[int] = sum_reverse(_UpperCamelCase ) iterations += 1 if is_palindrome(_UpperCamelCase ): break else: lychrel_nums.append(_UpperCamelCase ) return len(_UpperCamelCase ) if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" import pickle import shutil import tempfile import unittest from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin UpperCAmelCase : Optional[int] = get_tests_dir('fixtures/test_sentencepiece.model') @require_sentencepiece @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = XGLMTokenizer __a = XGLMTokenizerFast __a = True __a = True def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' super().setUp() # We have a SentencePiece fixture for testing __UpperCAmelCase : List[str] = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) tokenizer.save_pretrained(self.tmpdirname ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Dict = """<pad>""" __UpperCAmelCase : List[Any] = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(UpperCamelCase ) , UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(UpperCamelCase ) , UpperCamelCase ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<s>""" ) self.assertEqual(vocab_keys[1] , """<pad>""" ) self.assertEqual(len(UpperCamelCase ) , 1_008 ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.assertEqual(self.get_tokenizer().vocab_size , 1_008 ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = XGLMTokenizer(UpperCamelCase , keep_accents=UpperCamelCase ) __UpperCAmelCase : int = tokenizer.tokenize("""This is a test""" ) self.assertListEqual(UpperCamelCase , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(UpperCamelCase ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , ) __UpperCAmelCase : int = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """.""", ] , ) __UpperCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4] ] , ) __UpperCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(UpperCamelCase ) self.assertListEqual( UpperCamelCase , [ SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """.""", ] , ) @cached_property def lowerCamelCase__ ( self : Any ): '''simple docstring''' return XGLMTokenizer.from_pretrained("""facebook/xglm-564M""" ) def lowerCamelCase__ ( self : Dict ): '''simple docstring''' with tempfile.NamedTemporaryFile() as f: shutil.copyfile(UpperCamelCase , f.name ) __UpperCAmelCase : str = XGLMTokenizer(f.name , keep_accents=UpperCamelCase ) __UpperCAmelCase : Any = pickle.dumps(UpperCamelCase ) pickle.loads(UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' if not self.test_rust_tokenizer: return __UpperCAmelCase : Dict = self.get_tokenizer() __UpperCAmelCase : Optional[Any] = self.get_rust_tokenizer() __UpperCAmelCase : List[Any] = """I was born in 92000, and this is falsé.""" __UpperCAmelCase : str = tokenizer.tokenize(UpperCamelCase ) __UpperCAmelCase : List[str] = rust_tokenizer.tokenize(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(UpperCamelCase , add_special_tokens=UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = self.get_rust_tokenizer() __UpperCAmelCase : List[Any] = tokenizer.encode(UpperCamelCase ) __UpperCAmelCase : Optional[Any] = rust_tokenizer.encode(UpperCamelCase ) self.assertListEqual(UpperCamelCase , UpperCamelCase ) @slow def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = """Hello World!""" __UpperCAmelCase : Optional[Any] = [2, 31_227, 4_447, 35] self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) ) @slow def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : int = ( """This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will""" """ add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth""" ) # fmt: off __UpperCAmelCase : Optional[Any] = [2, 1_018, 67, 11, 1_988, 2_617, 5_631, 278, 11, 3_407, 48, 71_630, 28_085, 4, 3_234, 157, 13, 6, 5, 6, 4, 3_526, 768, 15, 659, 57, 298, 3_983, 864, 129, 21, 6, 5, 13_675, 377, 652, 7_580, 10_341, 155, 2_817, 422, 1_666, 7, 1_674, 53, 113, 202_277, 17_892, 33, 60, 87, 4, 3_234, 157, 61, 2_667, 52_376, 19, 88, 23, 735] # fmt: on self.assertListEqual(UpperCamelCase , self.big_tokenizer.encode(UpperCamelCase ) ) @slow def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = { """input_ids""": [[2, 108_825, 1_163, 15, 88_010, 473, 15_898, 157, 13_672, 1_857, 312, 8, 238_021, 1_163, 53, 13_672, 1_857, 312, 8, 53_283, 182_396, 8, 18_566, 16, 36_733, 4_101, 8, 230, 244_017, 122_553, 7, 15, 132_597, 4, 293, 12_511, 7_610, 4, 3_414, 132_597, 9, 4, 32_361, 362, 4, 734, 28_512, 32_569, 18, 4, 32_361, 26_096, 14_982, 73, 18_715, 21_433, 235_261, 15, 492, 12_427, 16, 53, 18_715, 21_433, 65_454, 15, 23_659, 563, 16, 278, 597, 2_843, 595, 7_931, 182_396, 64_186, 22, 886, 595, 132_981, 53, 25_540, 3_449, 43_982, 39_901, 5_951, 878, 330, 4, 27_694, 80_269, 312, 53, 6_517, 11_780, 611, 20_408, 5], [2, 6, 132_597, 67, 42_897, 33, 592, 8, 163_729, 25_540, 361, 136_997, 109_514, 173_230, 7, 501, 60, 102_913, 196, 5_631, 235, 63_243, 473, 6, 231_757, 74, 5_277, 7_905, 53, 3_095, 37_317, 22, 454, 183_874, 5], [2, 268, 31_298, 46_530, 6, 132_935, 43_831, 7, 597, 32, 24, 3_688, 9_865, 5]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] } # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=UpperCamelCase , model_name="""facebook/xglm-564M""" , padding=UpperCamelCase , )
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class lowerCamelCase__ ( datasets.BuilderConfig ): """simple docstring""" __a = None class lowerCamelCase__ ( datasets.ArrowBasedBuilder ): """simple docstring""" __a = PandasConfig def lowerCamelCase__ ( self : Any ): '''simple docstring''' return datasets.DatasetInfo(features=self.config.features ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Union[str, Any] ): '''simple docstring''' if not self.config.data_files: raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' ) __UpperCAmelCase : str = dl_manager.download_and_extract(self.config.data_files ) if isinstance(UpperCamelCase , (str, list, tuple) ): __UpperCAmelCase : Union[str, Any] = data_files if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[Any] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __UpperCAmelCase : Optional[Any] = [dl_manager.iter_files(UpperCamelCase ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )] __UpperCAmelCase : Dict = [] for split_name, files in data_files.items(): if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : str = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive __UpperCAmelCase : str = [dl_manager.iter_files(UpperCamelCase ) for file in files] splits.append(datasets.SplitGenerator(name=UpperCamelCase , gen_kwargs={"""files""": files} ) ) return splits def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : pa.Table ): '''simple docstring''' if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example __UpperCAmelCase : Optional[int] = table_cast(UpperCamelCase , self.config.features.arrow_schema ) return pa_table def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[Any] ): '''simple docstring''' for i, file in enumerate(itertools.chain.from_iterable(UpperCamelCase ) ): with open(UpperCamelCase , """rb""" ) as f: __UpperCAmelCase : Any = pa.Table.from_pandas(pd.read_pickle(UpperCamelCase ) ) yield i, self._cast_table(UpperCamelCase )
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available UpperCAmelCase : List[str] = { 'configuration_efficientnet': [ 'EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EfficientNetConfig', 'EfficientNetOnnxConfig', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['EfficientNetImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ 'EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'EfficientNetForImageClassification', 'EfficientNetModel', 'EfficientNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_efficientnet import ( EFFICIENTNET_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientNetConfig, EfficientNetOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientnet import EfficientNetImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientnet import ( EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientNetForImageClassification, EfficientNetModel, EfficientNetPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure)
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : Tuple , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Dict = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : Tuple = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : List[Any] = do_center_crop __UpperCAmelCase : Any = crop_size __UpperCAmelCase : int = do_rescale __UpperCAmelCase : Optional[int] = rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[str] = do_convert_rgb def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : List[str] = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str , ): '''simple docstring''' __UpperCAmelCase : List[str] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : int , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : str , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : str , ): '''simple docstring''' __UpperCAmelCase : List[Any] = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Tuple = size if size is not None else self.size __UpperCAmelCase : str = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Any = resample if resample is not None else self.resample __UpperCAmelCase : Optional[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : Tuple = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Any = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : List[Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Dict = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : List[str] = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Optional[Any] = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : int = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : Optional[int] = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Any = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : str = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : List[Any] = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Any = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Tuple = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Union[str, Any] = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : str = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import unittest from huggingface_hub import hf_hub_download from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor from transformers.pipelines import VideoClassificationPipeline, pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_decord, require_tf, require_torch, require_torch_or_tf, require_vision, ) from .test_pipelines_common import ANY @is_pipeline_test @require_torch_or_tf @require_vision @require_decord class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Any = hf_hub_download( repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) __UpperCAmelCase : Optional[Any] = VideoClassificationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase , top_k=2 ) __UpperCAmelCase : Any = [ example_video_filepath, """https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4""", ] return video_classifier, examples def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Dict ): '''simple docstring''' for example in examples: __UpperCAmelCase : Tuple = video_classifier(UpperCamelCase ) self.assertEqual( UpperCamelCase , [ {"""score""": ANY(UpperCamelCase ), """label""": ANY(UpperCamelCase )}, {"""score""": ANY(UpperCamelCase ), """label""": ANY(UpperCamelCase )}, ] , ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Tuple = """hf-internal-testing/tiny-random-VideoMAEForVideoClassification""" __UpperCAmelCase : Any = VideoMAEFeatureExtractor( size={"""shortest_edge""": 10} , crop_size={"""height""": 10, """width""": 10} ) __UpperCAmelCase : List[str] = pipeline( """video-classification""" , model=UpperCamelCase , feature_extractor=UpperCamelCase , frame_sampling_rate=4 ) __UpperCAmelCase : int = hf_hub_download(repo_id="""nateraw/video-demo""" , filename="""archery.mp4""" , repo_type="""dataset""" ) __UpperCAmelCase : Optional[int] = video_classifier(UpperCamelCase , top_k=2 ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}] , ) __UpperCAmelCase : Any = video_classifier( [ video_file_path, video_file_path, ] , top_k=2 , ) self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], [{"""score""": 0.5199, """label""": """LABEL_0"""}, {"""score""": 0.4801, """label""": """LABEL_1"""}], ] , ) @require_tf def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' pass
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices UpperCAmelCase : Any = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'shi-labs/nat-mini-in1k-224': 'https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json', # See all Nat models at https://huggingface.co/models?filter=nat } class lowerCamelCase__ ( A , A ): """simple docstring""" __a = """nat""" __a = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : Optional[int] , UpperCamelCase : Optional[Any]=4 , UpperCamelCase : str=3 , UpperCamelCase : str=64 , UpperCamelCase : Optional[int]=[3, 4, 6, 5] , UpperCamelCase : List[str]=[2, 4, 8, 16] , UpperCamelCase : Dict=7 , UpperCamelCase : str=3.0 , UpperCamelCase : Tuple=True , UpperCamelCase : int=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : Dict="gelu" , UpperCamelCase : str=0.02 , UpperCamelCase : int=1e-5 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Dict=None , UpperCamelCase : Optional[Any]=None , **UpperCamelCase : Optional[int] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : int = patch_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : List[Any] = embed_dim __UpperCAmelCase : List[Any] = depths __UpperCAmelCase : List[str] = len(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = num_heads __UpperCAmelCase : List[str] = kernel_size __UpperCAmelCase : Optional[int] = mlp_ratio __UpperCAmelCase : List[Any] = qkv_bias __UpperCAmelCase : Tuple = hidden_dropout_prob __UpperCAmelCase : str = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = drop_path_rate __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : List[Any] = layer_norm_eps __UpperCAmelCase : Optional[Any] = initializer_range # we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCAmelCase : Dict = int(embed_dim * 2 ** (len(UpperCamelCase ) - 1) ) __UpperCAmelCase : Dict = layer_scale_init_value __UpperCAmelCase : Dict = ["""stem"""] + [f'''stage{idx}''' for idx in range(1 , len(UpperCamelCase ) + 1 )] __UpperCAmelCase ,__UpperCAmelCase : List[str] = get_aligned_output_features_output_indices( out_features=UpperCamelCase , out_indices=UpperCamelCase , stage_names=self.stage_names )
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCAmelCase : Dict = { 'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = ['MobileViTFeatureExtractor'] UpperCAmelCase : Union[str, Any] = ['MobileViTImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'MobileViTForImageClassification', 'MobileViTForSemanticSegmentation', 'MobileViTModel', 'MobileViTPreTrainedModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFMobileViTForImageClassification', 'TFMobileViTForSemanticSegmentation', 'TFMobileViTModel', 'TFMobileViTPreTrainedModel', ] if TYPE_CHECKING: from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilevit import MobileViTFeatureExtractor from .image_processing_mobilevit import MobileViTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilevit import ( MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel, MobileViTPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_mobilevit import ( TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileViTForImageClassification, TFMobileViTForSemanticSegmentation, TFMobileViTModel, TFMobileViTPreTrainedModel, ) else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCamelCase__ : """simple docstring""" def __init__( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : Any=sys.maxsize ): '''simple docstring''' __UpperCAmelCase : int = """bilinear""" __UpperCAmelCase : Tuple = max_size __UpperCAmelCase : Optional[Any] = short_edge_length def __call__( self : Optional[Any] , UpperCamelCase : Dict ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [] for img in imgs: __UpperCAmelCase ,__UpperCAmelCase : Tuple = img.shape[:2] # later: provide list and randomly choose index for resize __UpperCAmelCase : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img __UpperCAmelCase : int = size * 1.0 / min(UpperCamelCase , UpperCamelCase ) if h < w: __UpperCAmelCase ,__UpperCAmelCase : int = size, scale * w else: __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = scale * h, size if max(UpperCamelCase , UpperCamelCase ) > self.max_size: __UpperCAmelCase : Any = self.max_size * 1.0 / max(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : int = newh * scale __UpperCAmelCase : Any = neww * scale __UpperCAmelCase : Any = int(neww + 0.5 ) __UpperCAmelCase : Tuple = int(newh + 0.5 ) if img.dtype == np.uinta: __UpperCAmelCase : Optional[Any] = Image.fromarray(UpperCamelCase ) __UpperCAmelCase : Any = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) __UpperCAmelCase : str = np.asarray(UpperCamelCase ) else: __UpperCAmelCase : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw __UpperCAmelCase : int = nn.functional.interpolate( UpperCamelCase , (newh, neww) , mode=self.interp_method , align_corners=UpperCamelCase ).squeeze(0 ) img_augs.append(UpperCamelCase ) return img_augs class lowerCamelCase__ : """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : int = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) __UpperCAmelCase : Union[str, Any] = cfg.INPUT.FORMAT __UpperCAmelCase : Any = cfg.SIZE_DIVISIBILITY __UpperCAmelCase : str = cfg.PAD_VALUE __UpperCAmelCase : List[Any] = cfg.INPUT.MAX_SIZE_TEST __UpperCAmelCase : int = cfg.MODEL.DEVICE __UpperCAmelCase : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) __UpperCAmelCase : List[Any] = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) __UpperCAmelCase : str = lambda UpperCamelCase : (x - self.pixel_mean) / self.pixel_std def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = tuple(max(UpperCamelCase ) for s in zip(*[img.shape for img in images] ) ) __UpperCAmelCase : List[Any] = [im.shape[-2:] for im in images] __UpperCAmelCase : Union[str, Any] = [ nn.functional.pad( UpperCamelCase , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(UpperCamelCase , UpperCamelCase ) ] return torch.stack(UpperCamelCase ), torch.tensor(UpperCamelCase ) def __call__( self : Any , UpperCamelCase : List[Any] , UpperCamelCase : Any=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : str = [images] if single_image: assert len(UpperCamelCase ) == 1 for i in range(len(UpperCamelCase ) ): if isinstance(images[i] , torch.Tensor ): images.insert(UpperCamelCase , images.pop(UpperCamelCase ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( UpperCamelCase , torch.as_tensor(img_tensorize(images.pop(UpperCamelCase ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge __UpperCAmelCase : Any = torch.tensor([im.shape[:2] for im in images] ) __UpperCAmelCase : Dict = self.aug(UpperCamelCase ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic __UpperCAmelCase : Tuple = [self.normalizer(UpperCamelCase ) for x in images] # now pad them to do the following operations __UpperCAmelCase ,__UpperCAmelCase : List[str] = self.pad(UpperCamelCase ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad __UpperCAmelCase : int = torch.true_divide(UpperCamelCase , UpperCamelCase ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Optional[int] ) -> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Tuple[int, int] ) -> Union[str, Any]: '''simple docstring''' assert torch.isfinite(_UpperCamelCase ).all(), "Box tensor contains infinite or NaN!" __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = box_size tensor[:, 0].clamp_(min=0 , max=_UpperCamelCase ) tensor[:, 1].clamp_(min=0 , max=_UpperCamelCase ) tensor[:, 2].clamp_(min=0 , max=_UpperCamelCase ) tensor[:, 3].clamp_(min=0 , max=_UpperCamelCase )
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , UpperCamelCase : Tuple , UpperCamelCase : List[str]=7 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Tuple=18 , UpperCamelCase : List[str]=30 , UpperCamelCase : Dict=400 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[Any]=True , UpperCamelCase : str=[0.5, 0.5, 0.5] , UpperCamelCase : str=[0.5, 0.5, 0.5] , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = size if size is not None else {"""shortest_edge""": 18} __UpperCAmelCase : Tuple = crop_size if crop_size is not None else {"""height""": 18, """width""": 18} __UpperCAmelCase : Optional[Any] = parent __UpperCAmelCase : List[str] = batch_size __UpperCAmelCase : List[Any] = num_channels __UpperCAmelCase : List[str] = image_size __UpperCAmelCase : Any = min_resolution __UpperCAmelCase : str = max_resolution __UpperCAmelCase : List[str] = do_resize __UpperCAmelCase : Optional[int] = size __UpperCAmelCase : Dict = do_center_crop __UpperCAmelCase : Optional[Any] = crop_size __UpperCAmelCase : Dict = do_normalize __UpperCAmelCase : Dict = image_mean __UpperCAmelCase : List[Any] = image_std def lowerCamelCase__ ( self : str ): '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LevitImageProcessor if is_vision_available() else None def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = LevitImageProcessingTester(self ) @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_center_crop""" ) ) self.assertTrue(hasattr(UpperCamelCase , """size""" ) ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18} ) self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} ) __UpperCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42} ) self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} ) def lowerCamelCase__ ( self : str ): '''simple docstring''' pass def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __UpperCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input __UpperCAmelCase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __UpperCAmelCase : str = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input __UpperCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __UpperCAmelCase : int = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __UpperCAmelCase : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input __UpperCAmelCase : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , ) # Test batched __UpperCAmelCase : Optional[int] = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["""height"""], self.image_processor_tester.crop_size["""width"""], ) , )
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str ) -> str: '''simple docstring''' return " ".join(input_str.split()[::-1] ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : int = { 'configuration_rembert': ['REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RemBertConfig', 'RemBertOnnxConfig'] } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : int = ['RemBertTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = ['RemBertTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'RemBertForCausalLM', 'RemBertForMaskedLM', 'RemBertForMultipleChoice', 'RemBertForQuestionAnswering', 'RemBertForSequenceClassification', 'RemBertForTokenClassification', 'RemBertLayer', 'RemBertModel', 'RemBertPreTrainedModel', 'load_tf_weights_in_rembert', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = [ 'TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFRemBertForCausalLM', 'TFRemBertForMaskedLM', 'TFRemBertForMultipleChoice', 'TFRemBertForQuestionAnswering', 'TFRemBertForSequenceClassification', 'TFRemBertForTokenClassification', 'TFRemBertLayer', 'TFRemBertModel', 'TFRemBertPreTrainedModel', ] if TYPE_CHECKING: from .configuration_rembert import REMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RemBertConfig, RemBertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert import RemBertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_rembert_fast import RemBertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_rembert import ( REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RemBertForCausalLM, RemBertForMaskedLM, RemBertForMultipleChoice, RemBertForQuestionAnswering, RemBertForSequenceClassification, RemBertForTokenClassification, RemBertLayer, RemBertModel, RemBertPreTrainedModel, load_tf_weights_in_rembert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_rembert import ( TF_REMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFRemBertForCausalLM, TFRemBertForMaskedLM, TFRemBertForMultipleChoice, TFRemBertForQuestionAnswering, TFRemBertForSequenceClassification, TFRemBertForTokenClassification, TFRemBertLayer, TFRemBertModel, TFRemBertPreTrainedModel, ) else: import sys UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_rembert import RemBertTokenizer else: UpperCAmelCase : Tuple = None UpperCAmelCase : List[Any] = logging.get_logger(__name__) UpperCAmelCase : str = {'vocab_file': 'sentencepiece.model', 'tokenizer_file': 'tokenizer.json'} UpperCAmelCase : int = { 'vocab_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/sentencepiece.model', }, 'tokenizer_file': { 'google/rembert': 'https://huggingface.co/google/rembert/resolve/main/tokenizer.json', }, } UpperCAmelCase : List[str] = { 'google/rembert': 256, } UpperCAmelCase : List[Any] = '▁' class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = RemBertTokenizer def __init__( self : Optional[int] , UpperCamelCase : str=None , UpperCamelCase : Tuple=None , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : Optional[Any]=False , UpperCamelCase : Any="[CLS]" , UpperCamelCase : Optional[Any]="[SEP]" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : Any="[SEP]" , UpperCamelCase : Union[str, Any]="<pad>" , UpperCamelCase : Dict="[CLS]" , UpperCamelCase : Union[str, Any]="[MASK]" , **UpperCamelCase : Tuple , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token super().__init__( UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , ) __UpperCAmelCase : Tuple = do_lower_case __UpperCAmelCase : List[str] = remove_space __UpperCAmelCase : Dict = keep_accents __UpperCAmelCase : List[Any] = vocab_file __UpperCAmelCase : Optional[int] = False if not self.vocab_file else True def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' __UpperCAmelCase : Tuple = [self.sep_token_id] __UpperCAmelCase : Tuple = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( """You should not supply a second sequence if the provided sequence of """ """ids is already formatted with special tokens for the model.""" ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1] def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [self.sep_token_id] __UpperCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCamelCase ): logger.error("""Vocabulary path ({}) should be a directory""".format(UpperCamelCase ) ) return __UpperCAmelCase : int = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) return (out_vocab_file,)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_flava import FlavaImageProcessor UpperCAmelCase : Tuple = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Dict , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use FlavaImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowerCamelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def lowerCamelCase ( _UpperCamelCase : np.ndarray ) -> np.ndarray: '''simple docstring''' return (gray > 1_2_7) & (gray <= 2_5_5) def lowerCamelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray ) -> np.ndarray: '''simple docstring''' __UpperCAmelCase : Optional[Any] = np.zeros_like(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __UpperCAmelCase : int = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __UpperCAmelCase : Any = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __UpperCAmelCase : Optional[int] = int(summation > 0 ) return output if __name__ == "__main__": # read original image UpperCAmelCase : Any = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' UpperCAmelCase : List[Any] = np.array(Image.open(lena_path)) # kernel to be applied UpperCAmelCase : Optional[int] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) UpperCAmelCase : int = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image UpperCAmelCase : Optional[int] = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" from sklearn.metrics import fa_score import datasets UpperCAmelCase : List[Any] = '\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n' UpperCAmelCase : List[Any] = '\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n\n - \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {\'f1\': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results[\'f1\'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric("f1")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results[\'f1\'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")\n >>> print(round(results[\'f1\'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")\n >>> print(round(results[\'f1\'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'f1\': array([0.8, 0. , 0. ])}\n' UpperCAmelCase : Optional[int] = '\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCamelCase__ ( datasets.Metric ): """simple docstring""" def lowerCamelCase__ ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""int32""" ) ), """references""": datasets.Sequence(datasets.Value("""int32""" ) ), } if self.config_name == """multilabel""" else { """predictions""": datasets.Value("""int32""" ), """references""": datasets.Value("""int32""" ), } ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html"""] , ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : int , UpperCamelCase : List[str]=None , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Union[str, Any]="binary" , UpperCamelCase : int=None ): '''simple docstring''' __UpperCAmelCase : Tuple = fa_score( UpperCamelCase , UpperCamelCase , labels=UpperCamelCase , pos_label=UpperCamelCase , average=UpperCamelCase , sample_weight=UpperCamelCase ) return {"f1": float(UpperCamelCase ) if score.size == 1 else score}
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import numpy as np def lowerCamelCase ( _UpperCamelCase : np.array ) -> np.array: '''simple docstring''' return 1 / (1 + np.exp(-vector )) def lowerCamelCase ( _UpperCamelCase : np.array ) -> np.array: '''simple docstring''' return vector * sigmoid(1.702 * vector ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" from PIL import Image def lowerCamelCase ( _UpperCamelCase : Image , _UpperCamelCase : float ) -> Image: '''simple docstring''' def brightness(_UpperCamelCase : int ) -> float: return 1_2_8 + level + (c - 1_2_8) if not -255.0 <= level <= 255.0: raise ValueError("""level must be between -255.0 (black) and 255.0 (white)""" ) return img.point(_UpperCamelCase ) if __name__ == "__main__": # Load image with Image.open('image_data/lena.jpg') as img: # Change brightness to 100 UpperCAmelCase : Optional[int] = change_brightness(img, 100) brigt_img.save('image_data/lena_brightness.png', format='png')
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" from __future__ import annotations import json import requests from bsa import BeautifulSoup from fake_useragent import UserAgent UpperCAmelCase : Optional[Any] = {'UserAgent': UserAgent().random} def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> dict: '''simple docstring''' __UpperCAmelCase : Optional[int] = script.contents[0] __UpperCAmelCase : Tuple = json.loads(data[data.find("""{\"config\"""" ) : -1] ) return info["entry_data"]["ProfilePage"][0]["graphql"]["user"] class lowerCamelCase__ : """simple docstring""" def __init__( self : str , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : List[str] = f'''https://www.instagram.com/{username}/''' __UpperCAmelCase : Optional[Any] = self.get_json() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : List[str] = requests.get(self.url , headers=UpperCamelCase ).text __UpperCAmelCase : Tuple = BeautifulSoup(UpperCamelCase , """html.parser""" ).find_all("""script""" ) try: return extract_user_profile(scripts[4] ) except (json.decoder.JSONDecodeError, KeyError): return extract_user_profile(scripts[3] ) def __repr__( self : str ): '''simple docstring''' return f'''{self.__class__.__name__}(\'{self.username}\')''' def __str__( self : int ): '''simple docstring''' return f'''{self.fullname} ({self.username}) is {self.biography}''' @property def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return self.user_data["username"] @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["full_name"] @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["biography"] @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return self.user_data["business_email"] @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.user_data["external_url"] @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return self.user_data["edge_followed_by"]["count"] @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return self.user_data["edge_follow"]["count"] @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return self.user_data["edge_owner_to_timeline_media"]["count"] @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return self.user_data["profile_pic_url_hd"] @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return self.user_data["is_verified"] @property def lowerCamelCase__ ( self : int ): '''simple docstring''' return self.user_data["is_private"] def lowerCamelCase ( _UpperCamelCase : str = "github" ) -> None: '''simple docstring''' import os if os.environ.get("""CI""" ): return # test failing on GitHub Actions __UpperCAmelCase : Optional[Any] = InstagramUser(_UpperCamelCase ) assert instagram_user.user_data assert isinstance(instagram_user.user_data , _UpperCamelCase ) assert instagram_user.username == username if username != "github": return assert instagram_user.fullname == "GitHub" assert instagram_user.biography == "Built for developers." assert instagram_user.number_of_posts > 1_5_0 assert instagram_user.number_of_followers > 1_2_0_0_0_0 assert instagram_user.number_of_followings > 1_5 assert instagram_user.email == "support@github.com" assert instagram_user.website == "https://github.com/readme" assert instagram_user.profile_picture_url.startswith("""https://instagram.""" ) assert instagram_user.is_verified is True assert instagram_user.is_private is False if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = InstagramUser('github') print(instagram_user) print(F"{instagram_user.number_of_posts = }") print(F"{instagram_user.number_of_followers = }") print(F"{instagram_user.number_of_followings = }") print(F"{instagram_user.email = }") print(F"{instagram_user.website = }") print(F"{instagram_user.profile_picture_url = }") print(F"{instagram_user.is_verified = }") print(F"{instagram_user.is_private = }")
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" import math import sys def lowerCamelCase ( _UpperCamelCase : int ) -> int: '''simple docstring''' if number != int(_UpperCamelCase ): raise ValueError("""the value of input must be a natural number""" ) if number < 0: raise ValueError("""the value of input must not be a negative number""" ) if number == 0: return 1 __UpperCAmelCase : Optional[Any] = [-1] * (number + 1) __UpperCAmelCase : List[str] = 0 for i in range(1 , number + 1 ): __UpperCAmelCase : Union[str, Any] = sys.maxsize __UpperCAmelCase : Union[str, Any] = int(math.sqrt(_UpperCamelCase ) ) for j in range(1 , root + 1 ): __UpperCAmelCase : Dict = 1 + answers[i - (j**2)] __UpperCAmelCase : Optional[int] = min(_UpperCamelCase , _UpperCamelCase ) __UpperCAmelCase : Optional[int] = answer return answers[number] if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowerCamelCase ( _UpperCamelCase : str = "laptop" ) -> DataFrame: '''simple docstring''' __UpperCAmelCase : Dict = f'''https://www.amazon.in/laptop/s?k={product}''' __UpperCAmelCase : str = { """User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""", """Accept-Language""": """en-US, en;q=0.5""", } __UpperCAmelCase : Union[str, Any] = BeautifulSoup(requests.get(_UpperCamelCase , headers=_UpperCamelCase ).text ) # Initialize a Pandas dataframe with the column titles __UpperCAmelCase : str = DataFrame( columns=[ """Product Title""", """Product Link""", """Current Price of the product""", """Product Rating""", """MRP of the product""", """Discount""", ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( """div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""} ) , ): try: __UpperCAmelCase : Optional[int] = item.ha.text __UpperCAmelCase : Any = """https://www.amazon.in/""" + item.ha.a["""href"""] __UpperCAmelCase : str = item.find("""span""" , attrs={"""class""": """a-offscreen"""} ).text try: __UpperCAmelCase : Any = item.find("""span""" , attrs={"""class""": """a-icon-alt"""} ).text except AttributeError: __UpperCAmelCase : Tuple = """Not available""" try: __UpperCAmelCase : int = ( """₹""" + item.find( """span""" , attrs={"""class""": """a-price a-text-price"""} ).text.split("""₹""" )[1] ) except AttributeError: __UpperCAmelCase : Tuple = """""" try: __UpperCAmelCase : List[str] = float( ( ( float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) ) - float(product_price.strip("""₹""" ).replace(""",""" , """""" ) ) ) / float(product_mrp.strip("""₹""" ).replace(""",""" , """""" ) ) ) * 1_0_0 ) except ValueError: __UpperCAmelCase : List[str] = float("""nan""" ) except AttributeError: pass __UpperCAmelCase : int = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] __UpperCAmelCase : Dict = """ """ __UpperCAmelCase : Any = """ """ data_frame.index += 1 return data_frame if __name__ == "__main__": UpperCAmelCase : List[str] = 'headphones' get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 1_0 , _UpperCamelCase : int = 2_2 ) -> int: '''simple docstring''' __UpperCAmelCase : Optional[int] = range(1 , _UpperCamelCase ) __UpperCAmelCase : List[Any] = range(1 , _UpperCamelCase ) return sum( 1 for power in powers for base in bases if len(str(base**power ) ) == power ) if __name__ == "__main__": print(F"{solution(10, 22) = }")
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'google/pix2struct-textcaps-base': ( 'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json' ), } class lowerCamelCase__ ( A ): """simple docstring""" __a = """pix2struct_text_model""" __a = ["""past_key_values"""] __a = { """hidden_size""": """hidden_size""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : List[Any] , UpperCamelCase : Optional[Any]=50_244 , UpperCamelCase : List[Any]=768 , UpperCamelCase : Optional[Any]=64 , UpperCamelCase : Optional[Any]=2_048 , UpperCamelCase : Dict=12 , UpperCamelCase : Dict=12 , UpperCamelCase : List[Any]=32 , UpperCamelCase : Tuple=128 , UpperCamelCase : Union[str, Any]=0.1 , UpperCamelCase : Optional[Any]=1e-6 , UpperCamelCase : List[Any]=1.0 , UpperCamelCase : str="gelu_new" , UpperCamelCase : Union[str, Any]=0 , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[int]=0 , UpperCamelCase : Optional[int]=1 , UpperCamelCase : List[Any]=False , UpperCamelCase : Optional[Any]=True , **UpperCamelCase : Union[str, Any] , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : List[str] = d_kv __UpperCAmelCase : Union[str, Any] = d_ff __UpperCAmelCase : List[Any] = num_layers __UpperCAmelCase : List[Any] = num_heads __UpperCAmelCase : Union[str, Any] = relative_attention_num_buckets __UpperCAmelCase : Optional[int] = relative_attention_max_distance __UpperCAmelCase : Tuple = dropout_rate __UpperCAmelCase : Dict = layer_norm_epsilon __UpperCAmelCase : Union[str, Any] = initializer_factor __UpperCAmelCase : int = use_cache __UpperCAmelCase : Optional[Any] = eos_token_id __UpperCAmelCase : Tuple = decoder_start_token_id # for backwards compatibility __UpperCAmelCase : Union[str, Any] = dense_act_fn super().__init__( pad_token_id=UpperCamelCase , eos_token_id=UpperCamelCase , decoder_start_token_id=UpperCamelCase , tie_word_embeddings=UpperCamelCase , is_decoder=UpperCamelCase , **UpperCamelCase , ) @classmethod def lowerCamelCase__ ( cls : Optional[int] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : List[Any] ): '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : Any = cls.get_config_dict(UpperCamelCase , **UpperCamelCase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": __UpperCAmelCase : int = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase , **UpperCamelCase ) class lowerCamelCase__ ( A ): """simple docstring""" __a = """pix2struct_vision_model""" def __init__( self : Union[str, Any] , UpperCamelCase : Union[str, Any]=768 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : List[Any]=2_048 , UpperCamelCase : Tuple=64 , UpperCamelCase : Optional[int]=12 , UpperCamelCase : int=12 , UpperCamelCase : Any="gelu_new" , UpperCamelCase : List[Any]=1e-6 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : Dict=0.0 , UpperCamelCase : str=1e-1_0 , UpperCamelCase : Union[str, Any]=1.0 , UpperCamelCase : Union[str, Any]=4_096 , UpperCamelCase : Tuple=32 , UpperCamelCase : str=128 , **UpperCamelCase : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : List[str] = hidden_size __UpperCAmelCase : List[str] = patch_embed_hidden_size __UpperCAmelCase : Optional[Any] = d_ff __UpperCAmelCase : List[Any] = dropout_rate __UpperCAmelCase : List[str] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Optional[int] = initializer_factor __UpperCAmelCase : int = attention_dropout __UpperCAmelCase : Union[str, Any] = layer_norm_eps __UpperCAmelCase : List[Any] = dense_act_fn __UpperCAmelCase : Optional[Any] = seq_len __UpperCAmelCase : int = relative_attention_num_buckets __UpperCAmelCase : Dict = relative_attention_max_distance __UpperCAmelCase : List[Any] = d_kv @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , UpperCamelCase : Union[str, os.PathLike] , **UpperCamelCase : Optional[Any] ): '''simple docstring''' cls._set_token_in_kwargs(UpperCamelCase ) __UpperCAmelCase ,__UpperCAmelCase : List[str] = cls.get_config_dict(UpperCamelCase , **UpperCamelCase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": __UpperCAmelCase : Union[str, Any] = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase , **UpperCamelCase ) class lowerCamelCase__ ( A ): """simple docstring""" __a = """pix2struct""" __a = True def __init__( self : int , UpperCamelCase : Any=None , UpperCamelCase : Any=None , UpperCamelCase : Optional[Any]=1.0 , UpperCamelCase : Tuple=0.02 , UpperCamelCase : List[str]=False , UpperCamelCase : List[str]=False , UpperCamelCase : Optional[Any]=True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(tie_word_embeddings=UpperCamelCase , is_encoder_decoder=UpperCamelCase , **UpperCamelCase ) if text_config is None: __UpperCAmelCase : Optional[int] = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: __UpperCAmelCase : List[Any] = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) __UpperCAmelCase : Union[str, Any] = PixaStructTextConfig(**UpperCamelCase ) __UpperCAmelCase : str = PixaStructVisionConfig(**UpperCamelCase ) __UpperCAmelCase : Any = self.text_config.decoder_start_token_id __UpperCAmelCase : str = self.text_config.pad_token_id __UpperCAmelCase : Dict = self.text_config.eos_token_id __UpperCAmelCase : Optional[int] = initializer_factor __UpperCAmelCase : Tuple = initializer_range __UpperCAmelCase : Union[str, Any] = self.initializer_range __UpperCAmelCase : List[str] = self.initializer_range __UpperCAmelCase : Optional[int] = is_vqa @classmethod def lowerCamelCase__ ( cls : List[Any] , UpperCamelCase : PixaStructTextConfig , UpperCamelCase : PixaStructVisionConfig , **UpperCamelCase : List[Any] ): '''simple docstring''' return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Tuple = copy.deepcopy(self.__dict__ ) __UpperCAmelCase : Dict = self.text_config.to_dict() __UpperCAmelCase : List[Any] = self.vision_config.to_dict() __UpperCAmelCase : List[Any] = self.__class__.model_type return output
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" import argparse import requests import torch from PIL import Image from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor def lowerCamelCase ( _UpperCamelCase : Any ) -> int: '''simple docstring''' if "cls_token" in name: __UpperCAmelCase : Dict = name.replace("""cls_token""" , """vit.embeddings.cls_token""" ) if "mask_token" in name: __UpperCAmelCase : Dict = name.replace("""mask_token""" , """decoder.mask_token""" ) if "decoder_pos_embed" in name: __UpperCAmelCase : int = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" ) if "pos_embed" in name and "decoder" not in name: __UpperCAmelCase : int = name.replace("""pos_embed""" , """vit.embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __UpperCAmelCase : List[str] = name.replace("""patch_embed.proj""" , """vit.embeddings.patch_embeddings.projection""" ) if "patch_embed.norm" in name: __UpperCAmelCase : Optional[int] = name.replace("""patch_embed.norm""" , """vit.embeddings.norm""" ) if "decoder_blocks" in name: __UpperCAmelCase : List[str] = name.replace("""decoder_blocks""" , """decoder.decoder_layers""" ) if "blocks" in name: __UpperCAmelCase : Any = name.replace("""blocks""" , """vit.encoder.layer""" ) if "attn.proj" in name: __UpperCAmelCase : Dict = name.replace("""attn.proj""" , """attention.output.dense""" ) if "attn" in name: __UpperCAmelCase : str = name.replace("""attn""" , """attention.self""" ) if "norm1" in name: __UpperCAmelCase : Optional[Any] = name.replace("""norm1""" , """layernorm_before""" ) if "norm2" in name: __UpperCAmelCase : Dict = name.replace("""norm2""" , """layernorm_after""" ) if "mlp.fc1" in name: __UpperCAmelCase : Tuple = name.replace("""mlp.fc1""" , """intermediate.dense""" ) if "mlp.fc2" in name: __UpperCAmelCase : Optional[Any] = name.replace("""mlp.fc2""" , """output.dense""" ) if "decoder_embed" in name: __UpperCAmelCase : List[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" ) if "decoder_norm" in name: __UpperCAmelCase : Optional[Any] = name.replace("""decoder_norm""" , """decoder.decoder_norm""" ) if "decoder_pred" in name: __UpperCAmelCase : List[str] = name.replace("""decoder_pred""" , """decoder.decoder_pred""" ) if "norm.weight" in name and "decoder" not in name: __UpperCAmelCase : str = name.replace("""norm.weight""" , """vit.layernorm.weight""" ) if "norm.bias" in name and "decoder" not in name: __UpperCAmelCase : Tuple = name.replace("""norm.bias""" , """vit.layernorm.bias""" ) return name def lowerCamelCase ( _UpperCamelCase : Dict , _UpperCamelCase : List[str] ) -> Optional[int]: '''simple docstring''' for key in orig_state_dict.copy().keys(): __UpperCAmelCase : Optional[Any] = orig_state_dict.pop(_UpperCamelCase ) if "qkv" in key: __UpperCAmelCase : str = key.split(""".""" ) __UpperCAmelCase : Tuple = int(key_split[1] ) if "decoder_blocks" in key: __UpperCAmelCase : str = config.decoder_hidden_size __UpperCAmelCase : Dict = """decoder.decoder_layers.""" if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Tuple = val[dim : dim * 2, :] __UpperCAmelCase : Union[str, Any] = val[-dim:, :] elif "bias" in key: __UpperCAmelCase : Tuple = val[:dim] __UpperCAmelCase : Any = val[dim : dim * 2] __UpperCAmelCase : List[Any] = val[-dim:] else: __UpperCAmelCase : Tuple = config.hidden_size __UpperCAmelCase : Tuple = """vit.encoder.layer.""" if "weight" in key: __UpperCAmelCase : Any = val[:dim, :] __UpperCAmelCase : Union[str, Any] = val[dim : dim * 2, :] __UpperCAmelCase : int = val[-dim:, :] elif "bias" in key: __UpperCAmelCase : int = val[:dim] __UpperCAmelCase : List[Any] = val[dim : dim * 2] __UpperCAmelCase : Dict = val[-dim:] else: __UpperCAmelCase : List[Any] = val return orig_state_dict def lowerCamelCase ( _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : int = ViTMAEConfig() if "large" in checkpoint_url: __UpperCAmelCase : int = 1_0_2_4 __UpperCAmelCase : Dict = 4_0_9_6 __UpperCAmelCase : Optional[int] = 2_4 __UpperCAmelCase : int = 1_6 elif "huge" in checkpoint_url: __UpperCAmelCase : Tuple = 1_4 __UpperCAmelCase : Tuple = 1_2_8_0 __UpperCAmelCase : Optional[Any] = 5_1_2_0 __UpperCAmelCase : str = 3_2 __UpperCAmelCase : Union[str, Any] = 1_6 __UpperCAmelCase : List[str] = ViTMAEForPreTraining(_UpperCamelCase ) __UpperCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location="""cpu""" )["""model"""] __UpperCAmelCase : Union[str, Any] = ViTMAEImageProcessor(size=config.image_size ) __UpperCAmelCase : Tuple = convert_state_dict(_UpperCamelCase , _UpperCamelCase ) model.load_state_dict(_UpperCamelCase ) model.eval() __UpperCAmelCase : int = """https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg""" __UpperCAmelCase : Any = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) __UpperCAmelCase : Tuple = ViTMAEImageProcessor(size=config.image_size ) __UpperCAmelCase : Any = image_processor(images=_UpperCamelCase , return_tensors="""pt""" ) # forward pass torch.manual_seed(2 ) __UpperCAmelCase : int = model(**_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = outputs.logits if "large" in checkpoint_url: __UpperCAmelCase : Optional[int] = torch.tensor( [[-0.7_309, -0.7_128, -1.0_169], [-1.0_161, -0.9_058, -1.1_878], [-1.0_478, -0.9_411, -1.1_911]] ) elif "huge" in checkpoint_url: __UpperCAmelCase : Optional[Any] = torch.tensor( [[-1.1_599, -0.9_199, -1.2_221], [-1.1_952, -0.9_269, -1.2_307], [-1.2_143, -0.9_337, -1.2_262]] ) else: __UpperCAmelCase : Union[str, Any] = torch.tensor( [[-0.9_192, -0.8_481, -1.1_259], [-1.1_349, -1.0_034, -1.2_599], [-1.1_757, -1.0_429, -1.2_726]] ) # verify logits assert torch.allclose(logits[0, :3, :3] , _UpperCamelCase , atol=1E-4 ) print(f'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(_UpperCamelCase ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--checkpoint_url', default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth', type=str, help='URL of the checkpoint you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) UpperCAmelCase : str = parser.parse_args() convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" import pytest from datasets.splits import SplitDict, SplitInfo from datasets.utils.py_utils import asdict @pytest.mark.parametrize( """split_dict""" , [ SplitDict(), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 , dataset_name="""my_dataset""" )} ), SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1_3_3_7 , num_examples=4_2 )} ), SplitDict({"""train""": SplitInfo()} ), ] , ) def lowerCamelCase ( _UpperCamelCase : SplitDict ) -> Any: '''simple docstring''' __UpperCAmelCase : str = split_dict._to_yaml_list() assert len(_UpperCamelCase ) == len(_UpperCamelCase ) __UpperCAmelCase : Any = SplitDict._from_yaml_list(_UpperCamelCase ) for split_name, split_info in split_dict.items(): # dataset_name field is deprecated, and is therefore not part of the YAML dump __UpperCAmelCase : int = None # the split name of split_dict takes over the name of the split info object __UpperCAmelCase : Tuple = split_name assert split_dict == reloaded @pytest.mark.parametrize( """split_info""" , [SplitInfo(), SplitInfo(dataset_name=_UpperCamelCase ), SplitInfo(dataset_name="""my_dataset""" )] ) def lowerCamelCase ( _UpperCamelCase : int ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = asdict(SplitDict({"""train""": split_info} ) ) assert "dataset_name" in split_dict_asdict["train"] assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" import inspect from typing import Callable, List, Optional, Union import torch from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers import DiffusionPipeline from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , UpperCamelCase : AutoencoderKL , UpperCamelCase : CLIPTextModel , UpperCamelCase : CLIPTokenizer , UpperCamelCase : UNetaDConditionModel , UpperCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , UpperCamelCase : StableDiffusionSafetyChecker , UpperCamelCase : CLIPImageProcessor , ): '''simple docstring''' super().__init__() self.register_modules( vae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , unet=UpperCamelCase , scheduler=UpperCamelCase , safety_checker=UpperCamelCase , feature_extractor=UpperCamelCase , ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[Union[str, int]] = "auto" ): '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory __UpperCAmelCase : Dict = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(UpperCamelCase ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' self.enable_attention_slicing(UpperCamelCase ) @torch.no_grad() def __call__( self : Tuple , UpperCamelCase : Union[str, List[str]] , UpperCamelCase : int = 512 , UpperCamelCase : int = 512 , UpperCamelCase : int = 50 , UpperCamelCase : float = 7.5 , UpperCamelCase : Optional[Union[str, List[str]]] = None , UpperCamelCase : Optional[int] = 1 , UpperCamelCase : float = 0.0 , UpperCamelCase : Optional[torch.Generator] = None , UpperCamelCase : Optional[torch.FloatTensor] = None , UpperCamelCase : Optional[str] = "pil" , UpperCamelCase : bool = True , UpperCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , UpperCamelCase : int = 1 , UpperCamelCase : Optional[torch.FloatTensor] = None , **UpperCamelCase : Optional[int] , ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : List[str] = 1 elif isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) else: raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(UpperCamelCase )}''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if (callback_steps is None) or ( callback_steps is not None and (not isinstance(UpperCamelCase , UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(UpperCamelCase )}.''' ) # get prompt text embeddings __UpperCAmelCase : List[Any] = self.tokenizer( UpperCamelCase , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , ) __UpperCAmelCase : int = text_inputs.input_ids if text_input_ids.shape[-1] > self.tokenizer.model_max_length: __UpperCAmelCase : Optional[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] ) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' ) __UpperCAmelCase : Optional[int] = text_input_ids[:, : self.tokenizer.model_max_length] if text_embeddings is None: __UpperCAmelCase : Dict = self.text_encoder(text_input_ids.to(self.device ) )[0] # duplicate text embeddings for each generation per prompt, using mps friendly method __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = text_embeddings.shape __UpperCAmelCase : Any = text_embeddings.repeat(1 , UpperCamelCase , 1 ) __UpperCAmelCase : str = text_embeddings.view(bs_embed * num_images_per_prompt , UpperCamelCase , -1 ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. __UpperCAmelCase : List[Any] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: __UpperCAmelCase : List[str] if negative_prompt is None: __UpperCAmelCase : List[Any] = [""""""] elif type(UpperCamelCase ) is not type(UpperCamelCase ): raise TypeError( f'''`negative_prompt` should be the same type to `prompt`, but got {type(UpperCamelCase )} !=''' f''' {type(UpperCamelCase )}.''' ) elif isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Optional[int] = [negative_prompt] elif batch_size != len(UpperCamelCase ): raise ValueError( f'''`negative_prompt`: {negative_prompt} has batch size {len(UpperCamelCase )}, but `prompt`:''' f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches''' """ the batch size of `prompt`.""" ) else: __UpperCAmelCase : Dict = negative_prompt __UpperCAmelCase : Tuple = text_input_ids.shape[-1] __UpperCAmelCase : int = self.tokenizer( UpperCamelCase , padding="""max_length""" , max_length=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" , ) __UpperCAmelCase : Optional[Any] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method __UpperCAmelCase : Tuple = uncond_embeddings.shape[1] __UpperCAmelCase : Optional[Any] = uncond_embeddings.repeat(UpperCamelCase , UpperCamelCase , 1 ) __UpperCAmelCase : Optional[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , UpperCamelCase , -1 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes __UpperCAmelCase : str = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. __UpperCAmelCase : Optional[int] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8) __UpperCAmelCase : Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64) __UpperCAmelCase : Optional[int] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not exist on mps __UpperCAmelCase : List[Any] = torch.randn( UpperCamelCase , generator=UpperCamelCase , device="""cpu""" , dtype=UpperCamelCase ).to(self.device ) __UpperCAmelCase : Dict = torch.randn(UpperCamelCase , generator=UpperCamelCase , device="""cpu""" , dtype=UpperCamelCase ).to( self.device ) else: __UpperCAmelCase : Optional[int] = torch.randn( UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase ) __UpperCAmelCase : str = torch.randn(UpperCamelCase , generator=UpperCamelCase , device=self.device , dtype=UpperCamelCase ) else: if latents_reference.shape != latents_shape: raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) __UpperCAmelCase : List[str] = latents_reference.to(self.device ) __UpperCAmelCase : str = latents.to(self.device ) # This is the key part of the pipeline where we # try to ensure that the generated images w/ the same seed # but different sizes actually result in similar images __UpperCAmelCase : Any = (latents_shape[3] - latents_shape_reference[3]) // 2 __UpperCAmelCase : Dict = (latents_shape[2] - latents_shape_reference[2]) // 2 __UpperCAmelCase : List[Any] = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx __UpperCAmelCase : Optional[Any] = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy __UpperCAmelCase : Dict = 0 if dx < 0 else dx __UpperCAmelCase : Optional[Any] = 0 if dy < 0 else dy __UpperCAmelCase : Optional[Any] = max(-dx , 0 ) __UpperCAmelCase : int = max(-dy , 0 ) # import pdb # pdb.set_trace() __UpperCAmelCase : Dict = latents_reference[:, :, dy : dy + h, dx : dx + w] # set timesteps self.scheduler.set_timesteps(UpperCamelCase ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand __UpperCAmelCase : Any = self.scheduler.timesteps.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler __UpperCAmelCase : List[str] = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] __UpperCAmelCase : List[Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) __UpperCAmelCase : Dict = {} if accepts_eta: __UpperCAmelCase : int = eta for i, t in enumerate(self.progress_bar(UpperCamelCase ) ): # expand the latents if we are doing classifier free guidance __UpperCAmelCase : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __UpperCAmelCase : Optional[Any] = self.scheduler.scale_model_input(UpperCamelCase , UpperCamelCase ) # predict the noise residual __UpperCAmelCase : Optional[Any] = self.unet(UpperCamelCase , UpperCamelCase , encoder_hidden_states=UpperCamelCase ).sample # perform guidance if do_classifier_free_guidance: __UpperCAmelCase ,__UpperCAmelCase : List[str] = noise_pred.chunk(2 ) __UpperCAmelCase : int = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # compute the previous noisy sample x_t -> x_t-1 __UpperCAmelCase : List[Any] = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase , **UpperCamelCase ).prev_sample # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(UpperCamelCase , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = 1 / 0.18215 * latents __UpperCAmelCase : str = self.vae.decode(UpperCamelCase ).sample __UpperCAmelCase : Any = (image / 2 + 0.5).clamp(0 , 1 ) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16 __UpperCAmelCase : List[str] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if self.safety_checker is not None: __UpperCAmelCase : Union[str, Any] = self.feature_extractor(self.numpy_to_pil(UpperCamelCase ) , return_tensors="""pt""" ).to( self.device ) __UpperCAmelCase ,__UpperCAmelCase : str = self.safety_checker( images=UpperCamelCase , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) ) else: __UpperCAmelCase : Optional[int] = None if output_type == "pil": __UpperCAmelCase : Optional[Any] = self.numpy_to_pil(UpperCamelCase ) if not return_dict: return (image, has_nsfw_concept) return StableDiffusionPipelineOutput(images=UpperCamelCase , nsfw_content_detected=UpperCamelCase )
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 1_0_0_0 ) -> int: '''simple docstring''' __UpperCAmelCase : List[Any] = -1 __UpperCAmelCase : Optional[int] = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c __UpperCAmelCase : Optional[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a) __UpperCAmelCase : List[Any] = n - a - b if c * c == (a * a + b * b): __UpperCAmelCase : Any = a * b * c if candidate >= product: __UpperCAmelCase : int = candidate return product if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCAmelCase : int = logging.get_logger(__name__) UpperCAmelCase : Union[str, Any] = { 'vocab_file': 'vocab.txt', 'merges_file': 'bpe.codes', } UpperCAmelCase : Optional[int] = { 'vocab_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt', }, 'merges_file': { 'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes', 'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes', }, } UpperCAmelCase : Optional[int] = { 'vinai/phobert-base': 256, 'vinai/phobert-large': 256, } def lowerCamelCase ( _UpperCamelCase : List[str] ) -> str: '''simple docstring''' __UpperCAmelCase : Optional[Any] = set() __UpperCAmelCase : Optional[Any] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Optional[Any] = char __UpperCAmelCase : Union[str, Any] = set(_UpperCamelCase ) return pairs class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : Optional[Any] , UpperCamelCase : Any , UpperCamelCase : Dict , UpperCamelCase : Dict="<s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : Dict="</s>" , UpperCamelCase : int="<s>" , UpperCamelCase : str="<unk>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Optional[int]="<mask>" , **UpperCamelCase : str , ): '''simple docstring''' super().__init__( bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , **UpperCamelCase , ) __UpperCAmelCase : Optional[int] = vocab_file __UpperCAmelCase : Optional[int] = merges_file __UpperCAmelCase : Tuple = {} __UpperCAmelCase : Optional[Any] = 0 __UpperCAmelCase : List[Any] = 1 __UpperCAmelCase : Union[str, Any] = 2 __UpperCAmelCase : Optional[Any] = 3 self.add_from_file(UpperCamelCase ) __UpperCAmelCase : Tuple = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle: __UpperCAmelCase : Any = merges_handle.read().split("""\n""" )[:-1] __UpperCAmelCase : str = [tuple(merge.split()[:-1] ) for merge in merges] __UpperCAmelCase : Optional[Any] = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Dict = {} def lowerCamelCase__ ( self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : Any = [self.cls_token_id] __UpperCAmelCase : List[Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1] def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' __UpperCAmelCase : int = [self.sep_token_id] __UpperCAmelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' return len(self.encoder ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] ): '''simple docstring''' if token in self.cache: return self.cache[token] __UpperCAmelCase : Tuple = tuple(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) __UpperCAmelCase : Tuple = get_pairs(UpperCamelCase ) if not pairs: return token while True: __UpperCAmelCase : List[Any] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase ,__UpperCAmelCase : Tuple = bigram __UpperCAmelCase : Tuple = [] __UpperCAmelCase : Any = 0 while i < len(UpperCamelCase ): try: __UpperCAmelCase : List[str] = word.index(UpperCamelCase , UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Any = j if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : int = tuple(UpperCamelCase ) __UpperCAmelCase : Tuple = new_word if len(UpperCamelCase ) == 1: break else: __UpperCAmelCase : List[str] = get_pairs(UpperCamelCase ) __UpperCAmelCase : int = """@@ """.join(UpperCamelCase ) __UpperCAmelCase : Dict = word[:-4] __UpperCAmelCase : List[Any] = word return word def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : Any = re.findall(R"""\S+\n?""" , UpperCamelCase ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase ).split(""" """ ) ) ) return split_tokens def lowerCamelCase__ ( self : Tuple , UpperCamelCase : int ): '''simple docstring''' return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : str , UpperCamelCase : List[Any] ): '''simple docstring''' return self.decoder.get(UpperCamelCase , self.unk_token ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : int = """ """.join(UpperCamelCase ).replace("""@@ """ , """""" ).strip() return out_string def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : Union[str, Any] = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : str = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.vocab_file , UpperCamelCase ) if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCamelCase ): copyfile(self.merges_file , UpperCamelCase ) return out_vocab_file, out_merge_file def lowerCamelCase__ ( self : Any , UpperCamelCase : Optional[int] ): '''simple docstring''' if isinstance(UpperCamelCase , UpperCamelCase ): try: with open(UpperCamelCase , """r""" , encoding="""utf-8""" ) as fd: self.add_from_file(UpperCamelCase ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'''Incorrect encoding detected in {f}, please rebuild the dataset''' ) return __UpperCAmelCase : List[Any] = f.readlines() for lineTmp in lines: __UpperCAmelCase : Any = lineTmp.strip() __UpperCAmelCase : Tuple = line.rfind(""" """ ) if idx == -1: raise ValueError("""Incorrect dictionary format, expected '<token> <cnt>'""" ) __UpperCAmelCase : Dict = line[:idx] __UpperCAmelCase : Any = len(self.encoder )
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available UpperCAmelCase : str = { 'configuration_bloom': ['BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'BloomConfig', 'BloomOnnxConfig'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = ['BloomTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST', 'BloomForCausalLM', 'BloomModel', 'BloomPreTrainedModel', 'BloomForSequenceClassification', 'BloomForTokenClassification', 'BloomForQuestionAnswering', ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : Tuple = logging.get_logger(__name__) UpperCAmelCase : List[Any] = { 'facebook/vit-mae-base': 'https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class lowerCamelCase__ ( A ): """simple docstring""" __a = """vit_mae""" def __init__( self : str , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : str=12 , UpperCamelCase : Dict=12 , UpperCamelCase : Any=3_072 , UpperCamelCase : Any="gelu" , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : List[str]=0.0 , UpperCamelCase : str=0.02 , UpperCamelCase : Tuple=1e-1_2 , UpperCamelCase : List[str]=224 , UpperCamelCase : Optional[int]=16 , UpperCamelCase : Optional[Any]=3 , UpperCamelCase : Optional[int]=True , UpperCamelCase : str=16 , UpperCamelCase : List[Any]=512 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : Optional[Any]=2_048 , UpperCamelCase : Dict=0.75 , UpperCamelCase : List[str]=False , **UpperCamelCase : List[str] , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : int = num_hidden_layers __UpperCAmelCase : List[str] = num_attention_heads __UpperCAmelCase : Any = intermediate_size __UpperCAmelCase : List[str] = hidden_act __UpperCAmelCase : Dict = hidden_dropout_prob __UpperCAmelCase : List[Any] = attention_probs_dropout_prob __UpperCAmelCase : Optional[int] = initializer_range __UpperCAmelCase : Optional[int] = layer_norm_eps __UpperCAmelCase : int = image_size __UpperCAmelCase : Optional[int] = patch_size __UpperCAmelCase : List[str] = num_channels __UpperCAmelCase : Union[str, Any] = qkv_bias __UpperCAmelCase : int = decoder_num_attention_heads __UpperCAmelCase : List[Any] = decoder_hidden_size __UpperCAmelCase : Any = decoder_num_hidden_layers __UpperCAmelCase : Optional[Any] = decoder_intermediate_size __UpperCAmelCase : Optional[Any] = mask_ratio __UpperCAmelCase : Optional[Any] = norm_pix_loss
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int: '''simple docstring''' return number | (1 << position) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int: '''simple docstring''' return number & ~(1 << position) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int: '''simple docstring''' return number ^ (1 << position) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> bool: '''simple docstring''' return ((number >> position) & 1) == 1 def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : int ) -> int: '''simple docstring''' return int((number & (1 << position)) != 0 ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] ) -> Any: '''simple docstring''' __UpperCAmelCase : Optional[Any] = 0 while b > 0: if b & 1: res += a a += a b >>= 1 return res def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> str: '''simple docstring''' __UpperCAmelCase : Dict = 0 while b > 0: if b & 1: __UpperCAmelCase : int = ((res % c) + (a % c)) % c a += a b >>= 1 return res
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : list ) -> list: '''simple docstring''' def merge(_UpperCamelCase : list , _UpperCamelCase : list ) -> list: def _merge(): while left and right: yield (left if left[0] <= right[0] else right).pop(0 ) yield from left yield from right return list(_merge() ) if len(_UpperCamelCase ) <= 1: return collection __UpperCAmelCase : int = len(_UpperCamelCase ) // 2 return merge(merge_sort(collection[:mid] ) , merge_sort(collection[mid:] ) ) if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : str = input('Enter numbers separated by a comma:\n').strip() UpperCAmelCase : str = [int(item) for item in user_input.split(',')] print(*merge_sort(unsorted), sep=',')
320
"""simple docstring""" from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""image_processor""", """tokenizer"""] __a = """AutoImageProcessor""" __a = """AutoTokenizer""" def __init__( self : Union[str, Any] , UpperCamelCase : List[Any] , UpperCamelCase : List[str] ): '''simple docstring''' super().__init__(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : str = self.image_processor def __call__( self : Dict , UpperCamelCase : Optional[int]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : int=None , **UpperCamelCase : Optional[int] ): '''simple docstring''' if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""" ) if text is not None: __UpperCAmelCase : List[str] = self.tokenizer(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if images is not None: __UpperCAmelCase : Optional[Any] = self.image_processor(UpperCamelCase , return_tensors=UpperCamelCase , **UpperCamelCase ) if text is not None and images is not None: __UpperCAmelCase : str = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**UpperCamelCase ) , tensor_type=UpperCamelCase ) def lowerCamelCase__ ( self : List[str] , *UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ): '''simple docstring''' return self.tokenizer.batch_decode(*UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , *UpperCamelCase : str , **UpperCamelCase : Optional[Any] ): '''simple docstring''' return self.tokenizer.decode(*UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return ["input_ids", "attention_mask", "pixel_values"]
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Union[str, Any] = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ReformerTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[int] = ['ReformerTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST', 'ReformerAttention', 'ReformerForMaskedLM', 'ReformerForQuestionAnswering', 'ReformerForSequenceClassification', 'ReformerLayer', 'ReformerModel', 'ReformerModelWithLMHead', 'ReformerPreTrainedModel', ] if TYPE_CHECKING: from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer import ReformerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_reformer_fast import ReformerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ReformerPreTrainedModel, ) else: import sys UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" from __future__ import annotations def lowerCamelCase ( _UpperCamelCase : list[float] , _UpperCamelCase : list[float] ) -> float: '''simple docstring''' __UpperCAmelCase : Tuple = sorted(numsa + numsa ) __UpperCAmelCase ,__UpperCAmelCase : Dict = divmod(len(_UpperCamelCase ) , 2 ) if mod == 1: return all_numbers[div] else: return (all_numbers[div] + all_numbers[div - 1]) / 2 if __name__ == "__main__": import doctest doctest.testmod() UpperCAmelCase : List[Any] = [float(x) for x in input('Enter the elements of first array: ').split()] UpperCAmelCase : Optional[int] = [float(x) for x in input('Enter the elements of second array: ').split()] print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
320
1
"""simple docstring""" import multiprocessing import time from arguments import PretokenizationArguments from datasets import load_dataset from transformers import AutoTokenizer, HfArgumentParser def lowerCamelCase ( _UpperCamelCase : Dict ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Any = {} __UpperCAmelCase : List[Any] = tokenizer(example["""content"""] , truncation=_UpperCamelCase )["""input_ids"""] __UpperCAmelCase : Optional[Any] = len(example["""content"""] ) / len(output["""input_ids"""] ) return output UpperCAmelCase : int = HfArgumentParser(PretokenizationArguments) UpperCAmelCase : Optional[Any] = parser.parse_args() if args.num_workers is None: UpperCAmelCase : Optional[Any] = multiprocessing.cpu_count() UpperCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(args.tokenizer_dir) UpperCAmelCase : List[str] = time.time() UpperCAmelCase : Optional[Any] = load_dataset(args.dataset_name, split='train') print(F"Dataset loaded in {time.time()-t_start:.2f}s") UpperCAmelCase : List[Any] = time.time() UpperCAmelCase : List[Any] = ds.map( tokenize, num_proc=args.num_workers, remove_columns=[ 'repo_name', 'path', 'copies', 'size', 'content', 'license', 'hash', 'line_mean', 'line_max', 'alpha_frac', 'autogenerated', ], ) print(F"Dataset tokenized in {time.time()-t_start:.2f}s") UpperCAmelCase : Union[str, Any] = time.time() ds.push_to_hub(args.tokenized_data_repo) print(F"Data pushed to the hub in {time.time()-t_start:.2f}s")
320
"""simple docstring""" import tempfile import unittest from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from transformers.testing_utils import ( is_torch_available, require_optimum, require_torch, slow, ) if is_torch_available(): import torch @require_torch @require_optimum @slow class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : Dict = AutoTokenizer.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Optional[int] = tokenizer("""This is me""" , return_tensors="""pt""" ) __UpperCAmelCase : int = model.to_bettertransformer() self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) __UpperCAmelCase : Tuple = model.generate(**UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) ) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) self.assertFalse( any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) ) __UpperCAmelCase : Tuple = model_reloaded.generate(**UpperCamelCase ) self.assertTrue(torch.allclose(UpperCamelCase , UpperCamelCase ) ) def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Any = """hf-internal-testing/tiny-random-t5""" __UpperCAmelCase : List[Any] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.to_bettertransformer() with tempfile.TemporaryDirectory() as tmpdirname: with self.assertRaises(UpperCamelCase ): model.save_pretrained(UpperCamelCase ) __UpperCAmelCase : Tuple = model.reverse_bettertransformer() model.save_pretrained(UpperCamelCase )
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCAmelCase : Dict = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = ['BartphoTokenizer'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bartpho import BartphoTokenizer else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" import subprocess import sys from transformers import BertConfig, BertModel, BertTokenizer, pipeline from transformers.testing_utils import TestCasePlus, require_torch class lowerCamelCase__ ( A ): """simple docstring""" @require_torch def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Dict = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ __UpperCAmelCase : List[str] = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ __UpperCAmelCase : int = """ import socket def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache __UpperCAmelCase : int = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(UpperCamelCase ) BertModel.from_pretrained(UpperCamelCase ) BertTokenizer.from_pretrained(UpperCamelCase ) pipeline(task="""fill-mask""" , model=UpperCamelCase ) # baseline - just load from_pretrained with normal network __UpperCAmelCase : Any = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed __UpperCAmelCase : str = self.get_env() # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __UpperCAmelCase : Union[str, Any] = """1""" __UpperCAmelCase : List[str] = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Dict = """ from transformers import BertConfig, BertModel, BertTokenizer, pipeline """ __UpperCAmelCase : Any = """ mname = \"hf-internal-testing/tiny-random-bert\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) BertTokenizer.from_pretrained(mname) pipe = pipeline(task=\"fill-mask\", model=mname) print(\"success\") """ __UpperCAmelCase : Dict = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\") socket.socket = offline_socket """ # Force fetching the files so that we can use the cache __UpperCAmelCase : List[Any] = """hf-internal-testing/tiny-random-bert""" BertConfig.from_pretrained(UpperCamelCase ) BertModel.from_pretrained(UpperCamelCase ) BertTokenizer.from_pretrained(UpperCamelCase ) pipeline(task="""fill-mask""" , model=UpperCamelCase ) # baseline - just load from_pretrained with normal network __UpperCAmelCase : str = [sys.executable, """-c""", """\n""".join([load, run, mock] )] # should succeed __UpperCAmelCase : Dict = self.get_env() __UpperCAmelCase : List[Any] = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : int = """ from transformers import BertConfig, BertModel, BertTokenizer """ __UpperCAmelCase : List[Any] = """ mname = \"hf-internal-testing/tiny-random-bert-sharded\" BertConfig.from_pretrained(mname) BertModel.from_pretrained(mname) print(\"success\") """ __UpperCAmelCase : str = """ import socket def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\") socket.socket = offline_socket """ # baseline - just load from_pretrained with normal network __UpperCAmelCase : Optional[int] = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed __UpperCAmelCase : Optional[int] = self.get_env() __UpperCAmelCase : List[str] = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # next emulate no network __UpperCAmelCase : str = [sys.executable, """-c""", """\n""".join([load, mock, run] )] # Doesn't fail anymore since the model is in the cache due to other tests, so commenting this. # env["TRANSFORMERS_OFFLINE"] = "0" # result = subprocess.run(cmd, env=env, check=False, capture_output=True) # self.assertEqual(result.returncode, 1, result.stderr) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __UpperCAmelCase : Tuple = """1""" __UpperCAmelCase : List[Any] = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) @require_torch def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = """ from transformers import pipeline """ __UpperCAmelCase : Tuple = """ mname = \"hf-internal-testing/tiny-random-bert\" pipe = pipeline(model=mname) """ __UpperCAmelCase : Dict = """ import socket def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\") socket.socket = offline_socket """ __UpperCAmelCase : int = self.get_env() __UpperCAmelCase : Any = """1""" __UpperCAmelCase : List[str] = [sys.executable, """-c""", """\n""".join([load, mock, run] )] __UpperCAmelCase : List[str] = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase ) self.assertEqual(result.returncode , 1 , result.stderr ) self.assertIn( """You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = """ from transformers import AutoModel """ __UpperCAmelCase : Optional[Any] = """ mname = \"hf-internal-testing/test_dynamic_model\" AutoModel.from_pretrained(mname, trust_remote_code=True) print(\"success\") """ # baseline - just load from_pretrained with normal network __UpperCAmelCase : List[str] = [sys.executable, """-c""", """\n""".join([load, run] )] # should succeed __UpperCAmelCase : str = self.get_env() __UpperCAmelCase : int = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() ) # should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files __UpperCAmelCase : Optional[int] = """1""" __UpperCAmelCase : Union[str, Any] = subprocess.run(UpperCamelCase , env=UpperCamelCase , check=UpperCamelCase , capture_output=UpperCamelCase ) self.assertEqual(result.returncode , 0 , result.stderr ) self.assertIn("""success""" , result.stdout.decode() )
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[str] = { 'configuration_transfo_xl': ['TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TransfoXLConfig'], 'tokenization_transfo_xl': ['TransfoXLCorpus', 'TransfoXLTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Tuple = [ 'TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'AdaptiveEmbedding', 'TransfoXLForSequenceClassification', 'TransfoXLLMHeadModel', 'TransfoXLModel', 'TransfoXLPreTrainedModel', 'load_tf_weights_in_transfo_xl', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Dict = [ 'TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFAdaptiveEmbedding', 'TFTransfoXLForSequenceClassification', 'TFTransfoXLLMHeadModel', 'TFTransfoXLMainLayer', 'TFTransfoXLModel', 'TFTransfoXLPreTrainedModel', ] if TYPE_CHECKING: from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) else: import sys UpperCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from typing import Tuple, Union from ...modeling_outputs import BackboneOutput from ...modeling_utils import PreTrainedModel from ...utils import is_timm_available, is_torch_available, requires_backends from ...utils.backbone_utils import BackboneMixin from .configuration_timm_backbone import TimmBackboneConfig if is_timm_available(): import timm if is_torch_available(): from torch import Tensor class lowerCamelCase__ ( A , A ): """simple docstring""" __a = """pixel_values""" __a = False __a = TimmBackboneConfig def __init__( self : Dict , UpperCamelCase : Union[str, Any] , **UpperCamelCase : Optional[int] ): '''simple docstring''' requires_backends(self , """timm""" ) super().__init__(UpperCamelCase ) __UpperCAmelCase : List[Any] = config if config.backbone is None: raise ValueError("""backbone is not set in the config. Please set it to a timm model name.""" ) if config.backbone not in timm.list_models(): raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' ) if hasattr(UpperCamelCase , """out_features""" ) and config.out_features is not None: raise ValueError("""out_features is not supported by TimmBackbone. Please use out_indices instead.""" ) __UpperCAmelCase : List[Any] = getattr(UpperCamelCase , """use_pretrained_backbone""" , UpperCamelCase ) if pretrained is None: raise ValueError("""use_pretrained_backbone is not set in the config. Please set it to True or False.""" ) # We just take the final layer by default. This matches the default for the transformers models. __UpperCAmelCase : int = config.out_indices if getattr(UpperCamelCase , """out_indices""" , UpperCamelCase ) is not None else (-1,) __UpperCAmelCase : Optional[int] = timm.create_model( config.backbone , pretrained=UpperCamelCase , features_only=config.features_only , in_chans=config.num_channels , out_indices=UpperCamelCase , **UpperCamelCase , ) # These are used to control the output of the model when called. If output_hidden_states is True, then # return_layers is modified to include all layers. __UpperCAmelCase : Optional[int] = self._backbone.return_layers __UpperCAmelCase : List[Any] = {layer["""module"""]: str(UpperCamelCase ) for i, layer in enumerate(self._backbone.feature_info.info )} super()._init_backbone(UpperCamelCase ) @classmethod def lowerCamelCase__ ( cls : Union[str, Any] , UpperCamelCase : Optional[Any] , *UpperCamelCase : Optional[Any] , **UpperCamelCase : int ): '''simple docstring''' requires_backends(cls , ["""vision""", """timm"""] ) from ...models.timm_backbone import TimmBackboneConfig __UpperCAmelCase : int = kwargs.pop("""config""" , TimmBackboneConfig() ) __UpperCAmelCase : Dict = kwargs.pop("""use_timm_backbone""" , UpperCamelCase ) if not use_timm: raise ValueError("""use_timm_backbone must be True for timm backbones""" ) __UpperCAmelCase : Dict = kwargs.pop("""num_channels""" , config.num_channels ) __UpperCAmelCase : int = kwargs.pop("""features_only""" , config.features_only ) __UpperCAmelCase : str = kwargs.pop("""use_pretrained_backbone""" , config.use_pretrained_backbone ) __UpperCAmelCase : Tuple = kwargs.pop("""out_indices""" , config.out_indices ) __UpperCAmelCase : int = TimmBackboneConfig( backbone=UpperCamelCase , num_channels=UpperCamelCase , features_only=UpperCamelCase , use_pretrained_backbone=UpperCamelCase , out_indices=UpperCamelCase , ) return super()._from_config(UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Optional[int]=None , UpperCamelCase : Any=None , UpperCamelCase : List[str]=None , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict __UpperCAmelCase : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) __UpperCAmelCase : Tuple = output_attentions if output_attentions is not None else self.config.output_attentions if output_attentions: raise ValueError("""Cannot output attentions for timm backbones at the moment""" ) if output_hidden_states: # We modify the return layers to include all the stages of the backbone __UpperCAmelCase : Any = self._all_layers __UpperCAmelCase : Dict = self._backbone(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : List[Any] = self._return_layers __UpperCAmelCase : int = tuple(hidden_states[i] for i in self.out_indices ) else: __UpperCAmelCase : Optional[int] = self._backbone(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : List[str] = tuple(UpperCamelCase ) __UpperCAmelCase : Dict = tuple(UpperCamelCase ) if hidden_states is not None else None if not return_dict: __UpperCAmelCase : Optional[Any] = (feature_maps,) if output_hidden_states: __UpperCAmelCase : Tuple = output + (hidden_states,) return output return BackboneOutput(feature_maps=UpperCamelCase , hidden_states=UpperCamelCase , attentions=UpperCamelCase )
320
"""simple docstring""" def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : List[str] = [] __UpperCAmelCase : List[str] = 1 while len(_UpperCamelCase ) < 1E6: constant.append(str(_UpperCamelCase ) ) i += 1 __UpperCAmelCase : List[str] = """""".join(_UpperCamelCase ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[9_9] ) * int(constant[9_9_9] ) * int(constant[9_9_9_9] ) * int(constant[9_9_9_9_9] ) * int(constant[9_9_9_9_9_9] ) ) if __name__ == "__main__": print(solution())
320
1
"""simple docstring""" from __future__ import annotations class lowerCamelCase__ : """simple docstring""" def __init__( self : Any , UpperCamelCase : int = 0 ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = key def lowerCamelCase__ ( self : str , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCamelCase ) ^ key ) for ch in content] def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Optional[int] = key or self.__key or 1 # make sure key is an appropriate size key %= 255 return [chr(ord(UpperCamelCase ) ^ key ) for ch in content] def lowerCamelCase__ ( self : int , UpperCamelCase : str , UpperCamelCase : int = 0 ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : int = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned __UpperCAmelCase : Union[str, Any] = """""" for ch in content: ans += chr(ord(UpperCamelCase ) ^ key ) return ans def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : int = 0 ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = key or self.__key or 1 # make sure key can be any size while key > 255: key -= 255 # This will be returned __UpperCAmelCase : str = """""" for ch in content: ans += chr(ord(UpperCamelCase ) ^ key ) return ans def lowerCamelCase__ ( self : Any , UpperCamelCase : str , UpperCamelCase : int = 0 ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) try: with open(UpperCamelCase ) as fin, open("""encrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.encrypt_string(UpperCamelCase , UpperCamelCase ) ) except OSError: return False return True def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : int ): '''simple docstring''' assert isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ) try: with open(UpperCamelCase ) as fin, open("""decrypt.out""" , """w+""" ) as fout: # actual encrypt-process for line in fin: fout.write(self.decrypt_string(UpperCamelCase , UpperCamelCase ) ) except OSError: return False return True # Tests # crypt = XORCipher() # key = 67 # # test encrypt # print(crypt.encrypt("hallo welt",key)) # # test decrypt # print(crypt.decrypt(crypt.encrypt("hallo welt",key), key)) # # test encrypt_string # print(crypt.encrypt_string("hallo welt",key)) # # test decrypt_string # print(crypt.decrypt_string(crypt.encrypt_string("hallo welt",key),key)) # if (crypt.encrypt_file("test.txt",key)): # print("encrypt successful") # else: # print("encrypt unsuccessful") # if (crypt.decrypt_file("encrypt.out",key)): # print("decrypt successful") # else: # print("decrypt unsuccessful")
320
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCAmelCase : Tuple = { 'configuration_electra': ['ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ElectraConfig', 'ElectraOnnxConfig'], 'tokenization_electra': ['ElectraTokenizer'], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = ['ElectraTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Any = [ 'ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'ElectraForCausalLM', 'ElectraForMaskedLM', 'ElectraForMultipleChoice', 'ElectraForPreTraining', 'ElectraForQuestionAnswering', 'ElectraForSequenceClassification', 'ElectraForTokenClassification', 'ElectraModel', 'ElectraPreTrainedModel', 'load_tf_weights_in_electra', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFElectraForMaskedLM', 'TFElectraForMultipleChoice', 'TFElectraForPreTraining', 'TFElectraForQuestionAnswering', 'TFElectraForSequenceClassification', 'TFElectraForTokenClassification', 'TFElectraModel', 'TFElectraPreTrainedModel', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : str = [ 'FlaxElectraForCausalLM', 'FlaxElectraForMaskedLM', 'FlaxElectraForMultipleChoice', 'FlaxElectraForPreTraining', 'FlaxElectraForQuestionAnswering', 'FlaxElectraForSequenceClassification', 'FlaxElectraForTokenClassification', 'FlaxElectraModel', 'FlaxElectraPreTrainedModel', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCAmelCase : List[Any] = logging.get_logger(__name__) UpperCAmelCase : Optional[int] = { 'funnel-transformer/small': 'https://huggingface.co/funnel-transformer/small/resolve/main/config.json', 'funnel-transformer/small-base': 'https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json', 'funnel-transformer/medium': 'https://huggingface.co/funnel-transformer/medium/resolve/main/config.json', 'funnel-transformer/medium-base': 'https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json', 'funnel-transformer/intermediate': ( 'https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json' ), 'funnel-transformer/intermediate-base': ( 'https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json' ), 'funnel-transformer/large': 'https://huggingface.co/funnel-transformer/large/resolve/main/config.json', 'funnel-transformer/large-base': 'https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json', 'funnel-transformer/xlarge': 'https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json', 'funnel-transformer/xlarge-base': 'https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json', } class lowerCamelCase__ ( A ): """simple docstring""" __a = """funnel""" __a = { """hidden_size""": """d_model""", """num_attention_heads""": """n_head""", } def __init__( self : Tuple , UpperCamelCase : Any=30_522 , UpperCamelCase : List[Any]=[4, 4, 4] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=2 , UpperCamelCase : Optional[int]=768 , UpperCamelCase : str=12 , UpperCamelCase : Dict=64 , UpperCamelCase : Optional[Any]=3_072 , UpperCamelCase : str="gelu_new" , UpperCamelCase : Dict=0.1 , UpperCamelCase : Dict=0.1 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : List[Any]=None , UpperCamelCase : Any=1e-9 , UpperCamelCase : List[str]="mean" , UpperCamelCase : Any="relative_shift" , UpperCamelCase : Dict=True , UpperCamelCase : Dict=True , UpperCamelCase : str=True , **UpperCamelCase : str , ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = vocab_size __UpperCAmelCase : Any = block_sizes __UpperCAmelCase : Optional[Any] = [1] * len(UpperCamelCase ) if block_repeats is None else block_repeats assert len(UpperCamelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." __UpperCAmelCase : List[str] = num_decoder_layers __UpperCAmelCase : Dict = d_model __UpperCAmelCase : List[str] = n_head __UpperCAmelCase : Tuple = d_head __UpperCAmelCase : List[str] = d_inner __UpperCAmelCase : Optional[int] = hidden_act __UpperCAmelCase : Dict = hidden_dropout __UpperCAmelCase : Tuple = attention_dropout __UpperCAmelCase : int = activation_dropout __UpperCAmelCase : Dict = initializer_range __UpperCAmelCase : Optional[int] = initializer_std __UpperCAmelCase : int = layer_norm_eps assert pooling_type in [ "mean", "max", ], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' __UpperCAmelCase : int = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' __UpperCAmelCase : Optional[Any] = attention_type __UpperCAmelCase : str = separate_cls __UpperCAmelCase : Any = truncate_seq __UpperCAmelCase : str = pool_q_only super().__init__(**UpperCamelCase ) @property def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' return sum(self.block_sizes ) @num_hidden_layers.setter def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' raise NotImplementedError( """This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.""" ) @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return len(self.block_sizes ) @num_blocks.setter def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Any ): '''simple docstring''' raise NotImplementedError("""This model does not support the setting of `num_blocks`. Please set `block_sizes`.""" )
320
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCAmelCase : List[Any] = { 'configuration_xlm': ['XLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XLMConfig', 'XLMOnnxConfig'], 'tokenization_xlm': ['XLMTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : Optional[Any] = [ 'XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'XLMForMultipleChoice', 'XLMForQuestionAnswering', 'XLMForQuestionAnsweringSimple', 'XLMForSequenceClassification', 'XLMForTokenClassification', 'XLMModel', 'XLMPreTrainedModel', 'XLMWithLMHeadModel', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCAmelCase : List[Any] = [ 'TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST', 'TFXLMForMultipleChoice', 'TFXLMForQuestionAnsweringSimple', 'TFXLMForSequenceClassification', 'TFXLMForTokenClassification', 'TFXLMMainLayer', 'TFXLMModel', 'TFXLMPreTrainedModel', 'TFXLMWithLMHeadModel', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
320
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
1
"""simple docstring""" import importlib import os from dataclasses import dataclass from enum import Enum from typing import Any, Dict, Optional, Union import torch from ..utils import BaseOutput UpperCAmelCase : Optional[Any] = 'scheduler_config.json' class lowerCamelCase__ ( A ): """simple docstring""" __a = 1 __a = 2 __a = 3 __a = 4 __a = 5 __a = 6 __a = 7 __a = 8 __a = 9 __a = 10 __a = 11 __a = 12 __a = 13 __a = 14 @dataclass class lowerCamelCase__ ( A ): """simple docstring""" __a = 42 class lowerCamelCase__ : """simple docstring""" __a = SCHEDULER_CONFIG_NAME __a = [] __a = True @classmethod def lowerCamelCase__ ( cls : Any , UpperCamelCase : Dict[str, Any] = None , UpperCamelCase : Optional[str] = None , UpperCamelCase : Optional[Any]=False , **UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : List[Any] = cls.load_config( pretrained_model_name_or_path=UpperCamelCase , subfolder=UpperCamelCase , return_unused_kwargs=UpperCamelCase , return_commit_hash=UpperCamelCase , **UpperCamelCase , ) return cls.from_config(UpperCamelCase , return_unused_kwargs=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, os.PathLike] , UpperCamelCase : bool = False , **UpperCamelCase : Optional[Any] ): '''simple docstring''' self.save_config(save_directory=UpperCamelCase , push_to_hub=UpperCamelCase , **UpperCamelCase ) @property def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return self._get_compatibles() @classmethod def lowerCamelCase__ ( cls : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) ) __UpperCAmelCase : List[str] = importlib.import_module(__name__.split(""".""" )[0] ) __UpperCAmelCase : List[str] = [ getattr(UpperCamelCase , UpperCamelCase ) for c in compatible_classes_str if hasattr(UpperCamelCase , UpperCamelCase ) ] return compatible_classes
320
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
1
"""simple docstring""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging UpperCAmelCase : Dict = logging.get_logger(__name__) UpperCAmelCase : Tuple = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'} # See all LED models at https://huggingface.co/models?filter=LED UpperCAmelCase : str = { 'vocab_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json', }, 'merges_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt', }, 'tokenizer_file': { 'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json', }, } UpperCAmelCase : Dict = { 'allenai/led-base-16384': 1_6384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' __UpperCAmelCase : Optional[int] = ( list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) ) ) __UpperCAmelCase : Any = bs[:] __UpperCAmelCase : Optional[int] = 0 for b in range(2**8 ): if b not in bs: bs.append(_UpperCamelCase ) cs.append(2**8 + n ) n += 1 __UpperCAmelCase : List[Any] = [chr(_UpperCamelCase ) for n in cs] return dict(zip(_UpperCamelCase , _UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Optional[Any] ) -> List[Any]: '''simple docstring''' __UpperCAmelCase : Any = set() __UpperCAmelCase : Dict = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __UpperCAmelCase : Any = char return pairs class lowerCamelCase__ ( A ): """simple docstring""" __a = VOCAB_FILES_NAMES __a = PRETRAINED_VOCAB_FILES_MAP __a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __a = ["""input_ids""", """attention_mask"""] def __init__( self : int , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : Optional[Any]="replace" , UpperCamelCase : Tuple="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="</s>" , UpperCamelCase : str="<s>" , UpperCamelCase : Union[str, Any]="<unk>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : Tuple="<mask>" , UpperCamelCase : List[Any]=False , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else bos_token __UpperCAmelCase : Tuple = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else eos_token __UpperCAmelCase : Optional[int] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else sep_token __UpperCAmelCase : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else cls_token __UpperCAmelCase : Union[str, Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else unk_token __UpperCAmelCase : Any = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it __UpperCAmelCase : str = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token super().__init__( errors=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , cls_token=UpperCamelCase , pad_token=UpperCamelCase , mask_token=UpperCamelCase , add_prefix_space=UpperCamelCase , **UpperCamelCase , ) with open(UpperCamelCase , encoding="""utf-8""" ) as vocab_handle: __UpperCAmelCase : Dict = json.load(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()} __UpperCAmelCase : Optional[Any] = errors # how to handle errors in decoding __UpperCAmelCase : Optional[int] = bytes_to_unicode() __UpperCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()} with open(UpperCamelCase , encoding="""utf-8""" ) as merges_handle: __UpperCAmelCase : Optional[int] = merges_handle.read().split("""\n""" )[1:-1] __UpperCAmelCase : str = [tuple(merge.split() ) for merge in bpe_merges] __UpperCAmelCase : Tuple = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Optional[Any] = {} __UpperCAmelCase : Optional[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions __UpperCAmelCase : str = re.compile(R"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" ) @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' return len(self.encoder ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : Dict ): '''simple docstring''' if token in self.cache: return self.cache[token] __UpperCAmelCase : Any = tuple(UpperCamelCase ) __UpperCAmelCase : List[Any] = get_pairs(UpperCamelCase ) if not pairs: return token while True: __UpperCAmelCase : Union[str, Any] = min(UpperCamelCase , key=lambda UpperCamelCase : self.bpe_ranks.get(UpperCamelCase , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __UpperCAmelCase ,__UpperCAmelCase : str = bigram __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : int = 0 while i < len(UpperCamelCase ): try: __UpperCAmelCase : int = word.index(UpperCamelCase , UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __UpperCAmelCase : Optional[int] = j if word[i] == first and i < len(UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __UpperCAmelCase : List[str] = tuple(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = new_word if len(UpperCamelCase ) == 1: break else: __UpperCAmelCase : int = get_pairs(UpperCamelCase ) __UpperCAmelCase : Any = """ """.join(UpperCamelCase ) __UpperCAmelCase : Dict = word return word def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : int = [] for token in re.findall(self.pat , UpperCamelCase ): __UpperCAmelCase : int = """""".join( self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(UpperCamelCase ).split(""" """ ) ) return bpe_tokens def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] ): '''simple docstring''' return self.encoder.get(UpperCamelCase , self.encoder.get(self.unk_token ) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Optional[int] ): '''simple docstring''' return self.decoder.get(UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = """""".join(UpperCamelCase ) __UpperCAmelCase : Optional[int] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors ) return text def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return __UpperCAmelCase : List[Any] = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join( UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase , ensure_ascii=UpperCamelCase ) + """\n""" ) __UpperCAmelCase : Union[str, Any] = 0 with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) __UpperCAmelCase : Union[str, Any] = token_index writer.write(""" """.join(UpperCamelCase ) + """\n""" ) index += 1 return vocab_file, merge_file def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __UpperCAmelCase : int = [self.cls_token_id] __UpperCAmelCase : int = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase )) + [1] return [1] + ([0] * len(UpperCamelCase )) + [1, 1] + ([0] * len(UpperCamelCase )) + [1] def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ): '''simple docstring''' __UpperCAmelCase : List[str] = [self.sep_token_id] __UpperCAmelCase : Any = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : List[str]=False , **UpperCamelCase : List[str] ): '''simple docstring''' __UpperCAmelCase : List[str] = kwargs.pop("""add_prefix_space""" , self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(UpperCamelCase ) > 0 and not text[0].isspace()): __UpperCAmelCase : Optional[Any] = """ """ + text return (text, kwargs) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Union[Dict[str, EncodedInput], BatchEncoding] , UpperCamelCase : Optional[int] = None , UpperCamelCase : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , UpperCamelCase : Optional[int] = None , UpperCamelCase : Optional[bool] = None , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = super()._pad( encoded_inputs=UpperCamelCase , max_length=UpperCamelCase , padding_strategy=UpperCamelCase , pad_to_multiple_of=UpperCamelCase , return_attention_mask=UpperCamelCase , ) # Load from model defaults if return_attention_mask is None: __UpperCAmelCase : Optional[int] = """attention_mask""" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: __UpperCAmelCase : Union[str, Any] = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. __UpperCAmelCase : Any = len(encoded_inputs["""global_attention_mask"""] ) != len(UpperCamelCase ) if needs_to_be_padded: __UpperCAmelCase : Tuple = len(UpperCamelCase ) - len(encoded_inputs["""global_attention_mask"""] ) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` __UpperCAmelCase : List[Any] = ( encoded_inputs["""global_attention_mask"""] + [-1] * difference ) elif self.padding_side == "left": __UpperCAmelCase : List[Any] = [-1] * difference + encoded_inputs[ """global_attention_mask""" ] else: raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) ) return encoded_inputs
320
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
320
1
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : Optional[int] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) __UpperCAmelCase : List[Any] = sum(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = [[False for x in range(s + 1 )] for y in range(n + 1 )] for i in range(1 , n + 1 ): __UpperCAmelCase : Any = True for i in range(1 , s + 1 ): __UpperCAmelCase : List[Any] = False for i in range(1 , n + 1 ): for j in range(1 , s + 1 ): __UpperCAmelCase : Optional[int] = dp[i][j - 1] if arr[i - 1] <= j: __UpperCAmelCase : Union[str, Any] = dp[i][j] or dp[i - 1][j - arr[i - 1]] for j in range(int(s / 2 ) , -1 , -1 ): if dp[n][j] is True: __UpperCAmelCase : Optional[int] = s - 2 * j break return diff
320
1
"""simple docstring""" import argparse import torch from transformers import ( SpeechTaConfig, SpeechTaFeatureExtractor, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaProcessor, SpeechTaTokenizer, logging, ) from transformers.tokenization_utils import AddedToken logging.set_verbosity_info() UpperCAmelCase : Optional[int] = logging.get_logger('transformers.models.speecht5') UpperCAmelCase : Union[str, Any] = { 'speech_encoder_prenet.layer_norm': 'speecht5.encoder.prenet.feature_projection.layer_norm', 'speech_encoder_prenet.post_extract_proj': 'speecht5.encoder.prenet.feature_projection.projection', 'speech_encoder_prenet.pos_conv.0': 'speecht5.encoder.prenet.pos_conv_embed.conv', 'speech_encoder_prenet.mask_emb': 'speecht5.encoder.prenet.masked_spec_embed', } UpperCAmelCase : List[Any] = { 'text_encoder_prenet.encoder_prenet.0': 'speecht5.encoder.prenet.embed_tokens', 'text_encoder_prenet.encoder_prenet.1.alpha': 'speecht5.encoder.prenet.encode_positions.alpha', } UpperCAmelCase : Optional[int] = { 'speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0': 'speecht5.decoder.prenet.layers.0', 'speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0': 'speecht5.decoder.prenet.layers.1', 'speech_decoder_prenet.decoder_prenet.0.1': 'speecht5.decoder.prenet.final_layer', 'speech_decoder_prenet.decoder_prenet.1.alpha': 'speecht5.decoder.prenet.encode_positions.alpha', 'speech_decoder_prenet.spkembs_layer.0': 'speecht5.decoder.prenet.speaker_embeds_layer', } UpperCAmelCase : str = { 'speech_decoder_postnet.feat_out': 'speech_decoder_postnet.feat_out', 'speech_decoder_postnet.prob_out': 'speech_decoder_postnet.prob_out', 'speech_decoder_postnet.postnet.postnet.0.0': 'speech_decoder_postnet.layers.0.conv', 'speech_decoder_postnet.postnet.postnet.0.1': 'speech_decoder_postnet.layers.0.batch_norm', 'speech_decoder_postnet.postnet.postnet.1.0': 'speech_decoder_postnet.layers.1.conv', 'speech_decoder_postnet.postnet.postnet.1.1': 'speech_decoder_postnet.layers.1.batch_norm', 'speech_decoder_postnet.postnet.postnet.2.0': 'speech_decoder_postnet.layers.2.conv', 'speech_decoder_postnet.postnet.postnet.2.1': 'speech_decoder_postnet.layers.2.batch_norm', 'speech_decoder_postnet.postnet.postnet.3.0': 'speech_decoder_postnet.layers.3.conv', 'speech_decoder_postnet.postnet.postnet.3.1': 'speech_decoder_postnet.layers.3.batch_norm', 'speech_decoder_postnet.postnet.postnet.4.0': 'speech_decoder_postnet.layers.4.conv', 'speech_decoder_postnet.postnet.postnet.4.1': 'speech_decoder_postnet.layers.4.batch_norm', } UpperCAmelCase : str = { 'text_decoder_prenet.embed_tokens': 'speecht5.decoder.prenet.embed_tokens', } UpperCAmelCase : Dict = { 'text_decoder_postnet.output_projection': 'text_decoder_postnet.lm_head', } UpperCAmelCase : str = { 'encoder.layers.*.self_attn.k_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj', 'encoder.layers.*.self_attn.v_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj', 'encoder.layers.*.self_attn.q_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj', 'encoder.layers.*.self_attn.out_proj': 'speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj', 'encoder.layers.*.self_attn_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.layer_norm', 'encoder.layers.*.fc1': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense', 'encoder.layers.*.fc2': 'speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense', 'encoder.layers.*.final_layer_norm': 'speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'speecht5.encoder.wrapped_encoder.layer_norm', 'encoder.pos_emb.pe_k': 'speecht5.encoder.wrapped_encoder.embed_positions.pe_k', } UpperCAmelCase : Tuple = { 'decoder.layers.*.self_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj', 'decoder.layers.*.self_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj', 'decoder.layers.*.self_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj', 'decoder.layers.*.self_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj', 'decoder.layers.*.self_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm', 'decoder.layers.*.encoder_attn.k_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj', 'decoder.layers.*.encoder_attn.v_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj', 'decoder.layers.*.encoder_attn.q_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj', 'decoder.layers.*.encoder_attn.out_proj': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj', 'decoder.layers.*.encoder_attn_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm', 'decoder.layers.*.fc1': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense', 'decoder.layers.*.fc2': 'speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense', 'decoder.layers.*.final_layer_norm': 'speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm', } UpperCAmelCase : Any = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_TEXT_DECODER_PRENET, **MAPPING_TEXT_DECODER_POSTNET, } UpperCAmelCase : str = { **MAPPING_TEXT_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase : Optional[int] = { **MAPPING_SPEECH_ENCODER_PRENET, **MAPPING_ENCODER, **MAPPING_DECODER, **MAPPING_SPEECH_DECODER_PRENET, **MAPPING_SPEECH_DECODER_POSTNET, } UpperCAmelCase : List[str] = [] UpperCAmelCase : Union[str, Any] = [ 'encoder.version', 'encoder.layers.*.norm_k.weight', 'encoder.layers.*.norm_k.bias', 'decoder.version', 'decoder.layers.*.norm_k.weight', 'decoder.layers.*.norm_k.bias', 'decoder.pos_emb.pe_k', 'speech_encoder_prenet.embed_positions._float_tensor', 'text_decoder_prenet.embed_positions._float_tensor', ] UpperCAmelCase : List[Any] = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'speech_decoder_prenet.*', 'speech_decoder_postnet.*', ] UpperCAmelCase : Dict = IGNORE_KEYS + [ 'encoder.proj', 'speech_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] UpperCAmelCase : Dict = IGNORE_KEYS + [ 'encoder.proj', 'text_encoder_prenet.*', 'text_decoder_prenet.*', 'text_decoder_postnet.*', ] def lowerCamelCase ( _UpperCamelCase : Tuple , _UpperCamelCase : List[Any] , _UpperCamelCase : int , _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[int] ) -> List[str]: '''simple docstring''' for attribute in key.split(""".""" ): __UpperCAmelCase : List[Any] = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: __UpperCAmelCase : Union[str, Any] = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: __UpperCAmelCase : Union[str, Any] = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __UpperCAmelCase : List[str] = value elif weight_type == "weight_g": __UpperCAmelCase : Optional[Any] = value elif weight_type == "weight_v": __UpperCAmelCase : Any = value elif weight_type == "bias": __UpperCAmelCase : Dict = value elif weight_type == "running_mean": __UpperCAmelCase : int = value elif weight_type == "running_var": __UpperCAmelCase : Dict = value elif weight_type == "num_batches_tracked": __UpperCAmelCase : Optional[Any] = value else: __UpperCAmelCase : Optional[int] = value logger.info(f'''{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.''' ) def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> Dict: '''simple docstring''' for key in ignore_keys: if key.endswith(""".*""" ): if name.startswith(key[:-1] ): return True elif ".*." in key: __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = key.split(""".*.""" ) if prefix in name and suffix in name: return True elif key in name: return True return False def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Optional[int] ) -> List[str]: '''simple docstring''' __UpperCAmelCase : List[Any] = [] if task == "s2t": __UpperCAmelCase : Optional[int] = hf_model.speechta.encoder.prenet.feature_encoder __UpperCAmelCase : Union[str, Any] = MAPPING_S2T __UpperCAmelCase : Optional[Any] = IGNORE_KEYS_S2T elif task == "t2s": __UpperCAmelCase : Union[str, Any] = None __UpperCAmelCase : Optional[int] = MAPPING_T2S __UpperCAmelCase : str = IGNORE_KEYS_T2S elif task == "s2s": __UpperCAmelCase : str = hf_model.speechta.encoder.prenet.feature_encoder __UpperCAmelCase : Dict = MAPPING_S2S __UpperCAmelCase : int = IGNORE_KEYS_S2S else: raise ValueError(f'''Unsupported task: {task}''' ) for name, value in fairseq_dict.items(): if should_ignore(_UpperCamelCase , _UpperCamelCase ): logger.info(f'''{name} was ignored''' ) continue __UpperCAmelCase : Dict = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == """group""" , ) __UpperCAmelCase : Optional[int] = True else: for key, mapped_key in MAPPING.items(): # mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if "*" in key: __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = key.split(""".*.""" ) if prefix in name and suffix in name: __UpperCAmelCase : List[str] = suffix # if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: if key in name: __UpperCAmelCase : str = True if "*" in mapped_key: __UpperCAmelCase : Dict = name.split(_UpperCamelCase )[0].split(""".""" )[-2] __UpperCAmelCase : int = mapped_key.replace("""*""" , _UpperCamelCase ) if "weight_g" in name: __UpperCAmelCase : Optional[Any] = """weight_g""" elif "weight_v" in name: __UpperCAmelCase : Optional[int] = """weight_v""" elif "bias" in name: __UpperCAmelCase : Optional[Any] = """bias""" elif "weight" in name: __UpperCAmelCase : Any = """weight""" elif "running_mean" in name: __UpperCAmelCase : Union[str, Any] = """running_mean""" elif "running_var" in name: __UpperCAmelCase : Tuple = """running_var""" elif "num_batches_tracked" in name: __UpperCAmelCase : Any = """num_batches_tracked""" else: __UpperCAmelCase : str = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(f'''Unused weights: {unused_weights}''' ) def lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : List[Any] , _UpperCamelCase : List[Any] ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Optional[Any] = full_name.split("""conv_layers.""" )[-1] __UpperCAmelCase : Union[str, Any] = name.split(""".""" ) __UpperCAmelCase : Optional[Any] = int(items[0] ) __UpperCAmelCase : List[str] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __UpperCAmelCase : Dict = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __UpperCAmelCase : Union[str, Any] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) __UpperCAmelCase : int = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) __UpperCAmelCase : str = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : str=None , ) -> Union[str, Any]: '''simple docstring''' if config_path is not None: __UpperCAmelCase : str = SpeechTaConfig.from_pretrained(_UpperCamelCase ) else: __UpperCAmelCase : int = SpeechTaConfig() if task == "s2t": __UpperCAmelCase : Optional[int] = config.max_text_positions __UpperCAmelCase : str = SpeechTaForSpeechToText(_UpperCamelCase ) elif task == "t2s": __UpperCAmelCase : Optional[Any] = 1_8_7_6 __UpperCAmelCase : Optional[int] = 6_0_0 __UpperCAmelCase : List[str] = config.max_speech_positions __UpperCAmelCase : str = SpeechTaForTextToSpeech(_UpperCamelCase ) elif task == "s2s": __UpperCAmelCase : int = 1_8_7_6 __UpperCAmelCase : int = config.max_speech_positions __UpperCAmelCase : Union[str, Any] = SpeechTaForSpeechToSpeech(_UpperCamelCase ) else: raise ValueError(f'''Unknown task name: {task}''' ) if vocab_path: __UpperCAmelCase : Optional[Any] = SpeechTaTokenizer(_UpperCamelCase , model_max_length=config.max_text_positions ) # Mask token behaves like a normal word, i.e. include the space before it __UpperCAmelCase : int = AddedToken("""<mask>""" , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = mask_token tokenizer.add_special_tokens({"""mask_token""": mask_token} ) tokenizer.add_tokens(["""<ctc_blank>"""] ) __UpperCAmelCase : Optional[int] = SpeechTaFeatureExtractor() __UpperCAmelCase : Tuple = SpeechTaProcessor(tokenizer=_UpperCamelCase , feature_extractor=_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) __UpperCAmelCase : Optional[int] = torch.load(_UpperCamelCase ) recursively_load_weights(fairseq_checkpoint["""model"""] , _UpperCamelCase , _UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) if repo_id: print("""Pushing to the hub...""" ) processor.push_to_hub(_UpperCamelCase ) model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : str = argparse.ArgumentParser() parser.add_argument( '--task', default='s2t', type=str, help='Type of the SpeechT5 model you\'d like to convert. Should be one of \'s2t\', \'t2s\', \'s2s\'.', ) parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--vocab_path', default=None, type=str, help='Path to SentencePiece model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.' ) parser.add_argument( '--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.' ) UpperCAmelCase : Dict = parser.parse_args() convert_speechta_checkpoint( args.task, args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.vocab_path, args.push_to_hub, )
320
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCAmelCase : Optional[int] = logging.get_logger(__name__) if is_vision_available(): import PIL class lowerCamelCase__ ( A ): """simple docstring""" __a = ["""pixel_values"""] def __init__( self : Tuple , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : bool = True , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : bool = True , UpperCamelCase : Union[int, float] = 1 / 255 , UpperCamelCase : bool = True , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = True , **UpperCamelCase : str , ): '''simple docstring''' super().__init__(**UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = size if size is not None else {"""shortest_edge""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) __UpperCAmelCase : str = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __UpperCAmelCase : str = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase , param_name="""crop_size""" ) __UpperCAmelCase : int = do_resize __UpperCAmelCase : Tuple = size __UpperCAmelCase : Optional[Any] = resample __UpperCAmelCase : Any = do_center_crop __UpperCAmelCase : int = crop_size __UpperCAmelCase : Optional[int] = do_rescale __UpperCAmelCase : List[Any] = rescale_factor __UpperCAmelCase : Tuple = do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __UpperCAmelCase : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD __UpperCAmelCase : List[Any] = do_convert_rgb def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , default_to_square=UpperCamelCase ) if "shortest_edge" not in size: raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __UpperCAmelCase : int = get_resize_output_image_size(UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase ) return resize(UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : np.ndarray , UpperCamelCase : Dict[str, int] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Dict , ): '''simple docstring''' __UpperCAmelCase : Optional[int] = get_size_dict(UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[int, float] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return rescale(UpperCamelCase , scale=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Any , UpperCamelCase : np.ndarray , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Union[float, List[float]] , UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase : Any , ): '''simple docstring''' return normalize(UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase , data_format=UpperCamelCase , **UpperCamelCase ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : ImageInput , UpperCamelCase : bool = None , UpperCamelCase : Dict[str, int] = None , UpperCamelCase : PILImageResampling = None , UpperCamelCase : bool = None , UpperCamelCase : int = None , UpperCamelCase : bool = None , UpperCamelCase : float = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : Optional[Union[float, List[float]]] = None , UpperCamelCase : bool = None , UpperCamelCase : Optional[Union[str, TensorType]] = None , UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : str = do_resize if do_resize is not None else self.do_resize __UpperCAmelCase : Dict = size if size is not None else self.size __UpperCAmelCase : Optional[Any] = get_size_dict(UpperCamelCase , param_name="""size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Dict = resample if resample is not None else self.resample __UpperCAmelCase : List[Any] = do_center_crop if do_center_crop is not None else self.do_center_crop __UpperCAmelCase : str = crop_size if crop_size is not None else self.crop_size __UpperCAmelCase : Dict = get_size_dict(UpperCamelCase , param_name="""crop_size""" , default_to_square=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __UpperCAmelCase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor __UpperCAmelCase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __UpperCAmelCase : Any = image_mean if image_mean is not None else self.image_mean __UpperCAmelCase : Any = image_std if image_std is not None else self.image_std __UpperCAmelCase : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __UpperCAmelCase : List[str] = make_list_of_images(UpperCamelCase ) if not valid_images(UpperCamelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __UpperCAmelCase : int = [convert_to_rgb(UpperCamelCase ) for image in images] # All transformations expect numpy arrays. __UpperCAmelCase : Tuple = [to_numpy_array(UpperCamelCase ) for image in images] if do_resize: __UpperCAmelCase : Optional[int] = [self.resize(image=UpperCamelCase , size=UpperCamelCase , resample=UpperCamelCase ) for image in images] if do_center_crop: __UpperCAmelCase : int = [self.center_crop(image=UpperCamelCase , size=UpperCamelCase ) for image in images] if do_rescale: __UpperCAmelCase : Dict = [self.rescale(image=UpperCamelCase , scale=UpperCamelCase ) for image in images] if do_normalize: __UpperCAmelCase : Optional[Any] = [self.normalize(image=UpperCamelCase , mean=UpperCamelCase , std=UpperCamelCase ) for image in images] __UpperCAmelCase : Any = [to_channel_dimension_format(UpperCamelCase , UpperCamelCase ) for image in images] __UpperCAmelCase : Any = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase , tensor_type=UpperCamelCase )
320
1
"""simple docstring""" from collections import deque from math import floor from random import random from time import time class lowerCamelCase__ : """simple docstring""" def __init__( self : str ): '''simple docstring''' __UpperCAmelCase : Dict = {} def lowerCamelCase__ ( self : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any]=1 ): '''simple docstring''' if self.graph.get(UpperCamelCase ): if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: __UpperCAmelCase : Optional[int] = [[w, v]] if not self.graph.get(UpperCamelCase ): __UpperCAmelCase : Union[str, Any] = [] def lowerCamelCase__ ( self : Any ): '''simple docstring''' return list(self.graph ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Tuple , UpperCamelCase : Tuple ): '''simple docstring''' if self.graph.get(UpperCamelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[int]=-2 , UpperCamelCase : Union[str, Any]=-1 ): '''simple docstring''' if s == d: return [] __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : List[str] = [] if s == -2: __UpperCAmelCase : List[Any] = list(self.graph )[0] stack.append(UpperCamelCase ) visited.append(UpperCamelCase ) __UpperCAmelCase : str = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase : Optional[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(UpperCamelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase : Union[str, Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(UpperCamelCase ) != 0: __UpperCAmelCase : Dict = stack[len(UpperCamelCase ) - 1] else: __UpperCAmelCase : Optional[int] = ss # check if se have reached the starting point if len(UpperCamelCase ) == 0: return visited def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : List[str]=-1 ): '''simple docstring''' if c == -1: __UpperCAmelCase : Optional[Any] = floor(random() * 10_000 ) + 10 for i in range(UpperCamelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): __UpperCAmelCase : int = floor(random() * c ) + 1 if n != i: self.add_pair(UpperCamelCase , UpperCamelCase , 1 ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[str]=-2 ): '''simple docstring''' __UpperCAmelCase : str = deque() __UpperCAmelCase : Dict = [] if s == -2: __UpperCAmelCase : List[Any] = list(self.graph )[0] d.append(UpperCamelCase ) visited.append(UpperCamelCase ) while d: __UpperCAmelCase : Union[str, Any] = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase__ ( self : str , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Any = 0 for x in self.graph: for y in self.graph[x]: if y[1] == u: count += 1 return count def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase__ ( self : str , UpperCamelCase : int=-2 ): '''simple docstring''' __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Tuple = [] if s == -2: __UpperCAmelCase : Dict = list(self.graph )[0] stack.append(UpperCamelCase ) visited.append(UpperCamelCase ) __UpperCAmelCase : Dict = s __UpperCAmelCase : Any = [] while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase : List[str] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase : List[str] = node[1] break # check if all the children are visited if s == ss: sorted_nodes.append(stack.pop() ) if len(UpperCamelCase ) != 0: __UpperCAmelCase : Optional[int] = stack[len(UpperCamelCase ) - 1] else: __UpperCAmelCase : Optional[int] = ss # check if se have reached the starting point if len(UpperCamelCase ) == 0: return sorted_nodes def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [] __UpperCAmelCase : Dict = [] __UpperCAmelCase : List[str] = list(self.graph )[0] stack.append(UpperCamelCase ) visited.append(UpperCamelCase ) __UpperCAmelCase : List[str] = -2 __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : Dict = s __UpperCAmelCase : Tuple = False __UpperCAmelCase : str = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase : Union[str, Any] = len(UpperCamelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase : List[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase : Tuple = True if len(UpperCamelCase ) != 0: __UpperCAmelCase : Tuple = stack[len(UpperCamelCase ) - 1] else: __UpperCAmelCase : Dict = False indirect_parents.append(UpperCamelCase ) __UpperCAmelCase : int = s __UpperCAmelCase : int = ss # check if se have reached the starting point if len(UpperCamelCase ) == 0: return list(UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : int = [] __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : List[Any] = list(self.graph )[0] stack.append(UpperCamelCase ) visited.append(UpperCamelCase ) __UpperCAmelCase : List[str] = -2 __UpperCAmelCase : Any = [] __UpperCAmelCase : Any = s __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase : Dict = len(UpperCamelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase : str = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase : int = True if len(UpperCamelCase ) != 0: __UpperCAmelCase : Tuple = stack[len(UpperCamelCase ) - 1] else: __UpperCAmelCase : Optional[Any] = False indirect_parents.append(UpperCamelCase ) __UpperCAmelCase : Tuple = s __UpperCAmelCase : List[Any] = ss # check if se have reached the starting point if len(UpperCamelCase ) == 0: return False def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : List[Any]=-2 , UpperCamelCase : Tuple=-1 ): '''simple docstring''' __UpperCAmelCase : Dict = time() self.dfs(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = time() return end - begin def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Union[str, Any]=-2 ): '''simple docstring''' __UpperCAmelCase : int = time() self.bfs(UpperCamelCase ) __UpperCAmelCase : str = time() return end - begin class lowerCamelCase__ : """simple docstring""" def __init__( self : Dict ): '''simple docstring''' __UpperCAmelCase : Any = {} def lowerCamelCase__ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : List[Any]=1 ): '''simple docstring''' if self.graph.get(UpperCamelCase ): # if there already is a edge if self.graph[u].count([w, v] ) == 0: self.graph[u].append([w, v] ) else: # if u does not exist __UpperCAmelCase : Union[str, Any] = [[w, v]] # add the other way if self.graph.get(UpperCamelCase ): # if there already is a edge if self.graph[v].count([w, u] ) == 0: self.graph[v].append([w, u] ) else: # if u does not exist __UpperCAmelCase : Any = [[w, u]] def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Tuple ): '''simple docstring''' if self.graph.get(UpperCamelCase ): for _ in self.graph[u]: if _[1] == v: self.graph[u].remove(UpperCamelCase ) # the other way round if self.graph.get(UpperCamelCase ): for _ in self.graph[v]: if _[1] == u: self.graph[v].remove(UpperCamelCase ) def lowerCamelCase__ ( self : int , UpperCamelCase : Union[str, Any]=-2 , UpperCamelCase : int=-1 ): '''simple docstring''' if s == d: return [] __UpperCAmelCase : Optional[int] = [] __UpperCAmelCase : Optional[int] = [] if s == -2: __UpperCAmelCase : Optional[int] = list(self.graph )[0] stack.append(UpperCamelCase ) visited.append(UpperCamelCase ) __UpperCAmelCase : List[Any] = s while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase : List[Any] = s for node in self.graph[s]: if visited.count(node[1] ) < 1: if node[1] == d: visited.append(UpperCamelCase ) return visited else: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase : List[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() if len(UpperCamelCase ) != 0: __UpperCAmelCase : List[str] = stack[len(UpperCamelCase ) - 1] else: __UpperCAmelCase : Dict = ss # check if se have reached the starting point if len(UpperCamelCase ) == 0: return visited def lowerCamelCase__ ( self : Dict , UpperCamelCase : int=-1 ): '''simple docstring''' if c == -1: __UpperCAmelCase : Dict = floor(random() * 10_000 ) + 10 for i in range(UpperCamelCase ): # every vertex has max 100 edges for _ in range(floor(random() * 102 ) + 1 ): __UpperCAmelCase : Dict = floor(random() * c ) + 1 if n != i: self.add_pair(UpperCamelCase , UpperCamelCase , 1 ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : Tuple=-2 ): '''simple docstring''' __UpperCAmelCase : int = deque() __UpperCAmelCase : List[Any] = [] if s == -2: __UpperCAmelCase : int = list(self.graph )[0] d.append(UpperCamelCase ) visited.append(UpperCamelCase ) while d: __UpperCAmelCase : int = d.popleft() if len(self.graph[s] ) != 0: for node in self.graph[s]: if visited.count(node[1] ) < 1: d.append(node[1] ) visited.append(node[1] ) return visited def lowerCamelCase__ ( self : Tuple , UpperCamelCase : Tuple ): '''simple docstring''' return len(self.graph[u] ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase : Dict = [] __UpperCAmelCase : int = [] __UpperCAmelCase : Dict = list(self.graph )[0] stack.append(UpperCamelCase ) visited.append(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = -2 __UpperCAmelCase : List[str] = [] __UpperCAmelCase : Tuple = s __UpperCAmelCase : Dict = False __UpperCAmelCase : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase : Union[str, Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase : Optional[Any] = len(UpperCamelCase ) - 1 while len_stack >= 0: if stack[len_stack] == node[1]: anticipating_nodes.add(node[1] ) break else: anticipating_nodes.add(stack[len_stack] ) len_stack -= 1 if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase : Any = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase : Dict = True if len(UpperCamelCase ) != 0: __UpperCAmelCase : int = stack[len(UpperCamelCase ) - 1] else: __UpperCAmelCase : List[str] = False indirect_parents.append(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = s __UpperCAmelCase : Optional[int] = ss # check if se have reached the starting point if len(UpperCamelCase ) == 0: return list(UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Tuple = [] __UpperCAmelCase : str = [] __UpperCAmelCase : Any = list(self.graph )[0] stack.append(UpperCamelCase ) visited.append(UpperCamelCase ) __UpperCAmelCase : str = -2 __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : Optional[Any] = s __UpperCAmelCase : Tuple = False __UpperCAmelCase : Optional[int] = set() while True: # check if there is any non isolated nodes if len(self.graph[s] ) != 0: __UpperCAmelCase : List[Any] = s for node in self.graph[s]: if ( visited.count(node[1] ) > 0 and node[1] != parent and indirect_parents.count(node[1] ) > 0 and not on_the_way_back ): __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) - 1 while len_stack_minus_one >= 0: if stack[len_stack_minus_one] == node[1]: anticipating_nodes.add(node[1] ) break else: return True if visited.count(node[1] ) < 1: stack.append(node[1] ) visited.append(node[1] ) __UpperCAmelCase : Optional[Any] = node[1] break # check if all the children are visited if s == ss: stack.pop() __UpperCAmelCase : Tuple = True if len(UpperCamelCase ) != 0: __UpperCAmelCase : Optional[int] = stack[len(UpperCamelCase ) - 1] else: __UpperCAmelCase : Any = False indirect_parents.append(UpperCamelCase ) __UpperCAmelCase : Dict = s __UpperCAmelCase : Dict = ss # check if se have reached the starting point if len(UpperCamelCase ) == 0: return False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' return list(self.graph ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : Dict=-2 , UpperCamelCase : int=-1 ): '''simple docstring''' __UpperCAmelCase : List[str] = time() self.dfs(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[str] = time() return end - begin def lowerCamelCase__ ( self : str , UpperCamelCase : Optional[Any]=-2 ): '''simple docstring''' __UpperCAmelCase : Tuple = time() self.bfs(UpperCamelCase ) __UpperCAmelCase : Any = time() return end - begin
320
"""simple docstring""" from collections.abc import Sequence def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) ) def lowerCamelCase ( _UpperCamelCase : Sequence[float] , _UpperCamelCase : float ) -> float: '''simple docstring''' __UpperCAmelCase : Dict = 0.0 for coeff in reversed(_UpperCamelCase ): __UpperCAmelCase : Any = result * x + coeff return result if __name__ == "__main__": UpperCAmelCase : str = (0.0, 0.0, 5.0, 9.3, 7.0) UpperCAmelCase : str = 10.0 print(evaluate_poly(poly, x)) print(horner(poly, x))
320
1
"""simple docstring""" import hashlib import unittest from typing import Dict import numpy as np from transformers import ( MODEL_FOR_MASK_GENERATION_MAPPING, TF_MODEL_FOR_MASK_GENERATION_MAPPING, is_vision_available, pipeline, ) from transformers.pipelines import MaskGenerationPipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) if is_vision_available(): from PIL import Image else: class lowerCamelCase__ : """simple docstring""" @staticmethod def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ): '''simple docstring''' pass def lowerCamelCase ( _UpperCamelCase : Image ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() ) return m.hexdigest()[:1_0] def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict: '''simple docstring''' __UpperCAmelCase : Tuple = np.array(_UpperCamelCase ) __UpperCAmelCase : List[Any] = npimg.shape return {"hash": hashimage(_UpperCamelCase ), "shape": shape} @is_pipeline_test @require_vision @require_torch class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" __a = dict( (list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) ) __a = dict( (list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) ) def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase ) return image_segmenter, [ "./tests/fixtures/tests_samples/COCO/000000039769.png", "./tests/fixtures/tests_samples/COCO/000000039769.png", ] def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' pass @require_tf @unittest.skip("""Image segmentation not implemented in TF""" ) def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' pass @slow @require_torch def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" ) __UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : int = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] # fmt: off self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, {"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967}, {"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993}, {"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909}, {"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879}, {"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834}, {"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716}, {"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612}, {"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599}, {"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552}, {"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532}, {"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516}, {"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499}, {"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483}, {"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464}, {"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943}, {"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408}, {"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335}, {"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326}, {"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262}, {"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999}, {"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986}, {"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984}, {"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873}, {"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871} ] , ) # fmt: on @require_torch @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : Any = """facebook/sam-vit-huge""" __UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase ) __UpperCAmelCase : int = image_segmenter( """http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 ) # Shortening by hashing __UpperCAmelCase : Dict = [] for i, o in enumerate(outputs["""masks"""] ): new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}] self.assertEqual( nested_simplify(UpperCamelCase , decimals=4 ) , [ {"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444}, {"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210}, {"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167}, {"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132}, {"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053}, ] , )
320
"""simple docstring""" import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCAmelCase : Optional[int] = 'platform' import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class lowerCamelCase__ : """simple docstring""" __a = PegasusConfig __a = {} __a = """gelu""" def __init__( self : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Dict=True , UpperCamelCase : Union[str, Any]=False , UpperCamelCase : Optional[int]=99 , UpperCamelCase : Union[str, Any]=32 , UpperCamelCase : Union[str, Any]=5 , UpperCamelCase : Any=4 , UpperCamelCase : Tuple=37 , UpperCamelCase : Any=0.1 , UpperCamelCase : Any=0.1 , UpperCamelCase : Union[str, Any]=20 , UpperCamelCase : List[str]=2 , UpperCamelCase : int=1 , UpperCamelCase : Optional[Any]=0 , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = parent __UpperCAmelCase : str = batch_size __UpperCAmelCase : Optional[Any] = seq_length __UpperCAmelCase : Dict = is_training __UpperCAmelCase : Dict = use_labels __UpperCAmelCase : List[Any] = vocab_size __UpperCAmelCase : Dict = hidden_size __UpperCAmelCase : Optional[Any] = num_hidden_layers __UpperCAmelCase : Union[str, Any] = num_attention_heads __UpperCAmelCase : List[Any] = intermediate_size __UpperCAmelCase : Union[str, Any] = hidden_dropout_prob __UpperCAmelCase : List[str] = attention_probs_dropout_prob __UpperCAmelCase : List[Any] = max_position_embeddings __UpperCAmelCase : Any = eos_token_id __UpperCAmelCase : Optional[int] = pad_token_id __UpperCAmelCase : List[str] = bos_token_id def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size ) __UpperCAmelCase : str = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 ) __UpperCAmelCase : Union[str, Any] = np.concatenate([input_ids, eos_tensor] , axis=1 ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : Any = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) __UpperCAmelCase : Any = prepare_pegasus_inputs_dict(UpperCamelCase , UpperCamelCase , UpperCamelCase ) return config, inputs_dict def lowerCamelCase__ ( self : Dict , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : Tuple = model_class_name(UpperCamelCase ) __UpperCAmelCase : List[Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : int = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : Tuple = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Any = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : Union[str, Any] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Any = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Tuple = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Dict = model.decode(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = 20 __UpperCAmelCase : int = model_class_name(UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model.encode(inputs_dict["""input_ids"""] ) __UpperCAmelCase ,__UpperCAmelCase : Dict = ( inputs_dict["""decoder_input_ids"""], inputs_dict["""decoder_attention_mask"""], ) __UpperCAmelCase : int = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) __UpperCAmelCase : int = model.init_cache(decoder_input_ids.shape[0] , UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) __UpperCAmelCase : List[str] = model.decode( decoder_input_ids[:, :-1] , UpperCamelCase , decoder_attention_mask=UpperCamelCase , past_key_values=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" ) __UpperCAmelCase : Optional[int] = model.decode( decoder_input_ids[:, -1:] , UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=UpperCamelCase , decoder_position_ids=UpperCamelCase , ) __UpperCAmelCase : Union[str, Any] = model.decode(UpperCamelCase , UpperCamelCase , decoder_attention_mask=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' ) def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : Optional[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[str]=None , _UpperCamelCase : Any=None , ) -> Dict: '''simple docstring''' if attention_mask is None: __UpperCAmelCase : Optional[int] = np.not_equal(_UpperCamelCase , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: __UpperCAmelCase : Dict = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) __a = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () __a = True __a = False __a = False __a = False def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = FlaxPegasusModelTester(self ) __UpperCAmelCase : List[str] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(UpperCamelCase , UpperCamelCase , UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : Tuple = self._prepare_for_class(UpperCamelCase , UpperCamelCase ) __UpperCAmelCase : Dict = model_class(UpperCamelCase ) @jax.jit def encode_jitted(UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any]=None , **UpperCamelCase : List[str] ): return model.encode(input_ids=UpperCamelCase , attention_mask=UpperCamelCase ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Tuple = encode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : Optional[int] = encode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): __UpperCAmelCase : int = model_class(UpperCamelCase ) __UpperCAmelCase : int = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] ) __UpperCAmelCase : Any = { """decoder_input_ids""": inputs_dict["""decoder_input_ids"""], """decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""], """encoder_outputs""": encoder_outputs, } @jax.jit def decode_jitted(UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ): return model.decode( decoder_input_ids=UpperCamelCase , decoder_attention_mask=UpperCamelCase , encoder_outputs=UpperCamelCase , ) with self.subTest("""JIT Enabled""" ): __UpperCAmelCase : Union[str, Any] = decode_jitted(**UpperCamelCase ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): __UpperCAmelCase : str = decode_jitted(**UpperCamelCase ).to_tuple() self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) ) for jitted_output, output in zip(UpperCamelCase , UpperCamelCase ): self.assertEqual(jitted_output.shape , output.shape ) @slow def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for model_class_name in self.all_model_classes: __UpperCAmelCase : Optional[Any] = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=UpperCamelCase ) __UpperCAmelCase : Optional[int] = np.ones((1, 1) ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) @slow def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : Union[str, Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) __UpperCAmelCase : List[Any] = [ """ PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""", """ The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """, ] __UpperCAmelCase : List[str] = [ """California's largest electricity provider has turned off power to hundreds of thousands of customers.""", """Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""", ] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""np""" , truncation=UpperCamelCase , max_length=512 , padding=UpperCamelCase ) __UpperCAmelCase : int = model.generate(**UpperCamelCase , num_beams=2 ).sequences __UpperCAmelCase : str = tokenizer.batch_decode(UpperCamelCase , skip_special_tokens=UpperCamelCase ) assert tgt_text == decoded
320
1
"""simple docstring""" def lowerCamelCase ( _UpperCamelCase : int = 5_0 ) -> int: '''simple docstring''' __UpperCAmelCase : List[Any] = [1] * (length + 1) for row_length in range(3 , length + 1 ): for block_length in range(3 , row_length + 1 ): for block_start in range(row_length - block_length ): ways_number[row_length] += ways_number[ row_length - block_start - block_length - 1 ] ways_number[row_length] += 1 return ways_number[length] if __name__ == "__main__": print(F"{solution() = }")
320
"""simple docstring""" import argparse import json import os import numpy as np import PIL import requests import tensorflow.keras.applications.efficientnet as efficientnet import torch from huggingface_hub import hf_hub_download from PIL import Image from tensorflow.keras.preprocessing import image from transformers import ( EfficientNetConfig, EfficientNetForImageClassification, EfficientNetImageProcessor, ) from transformers.utils import logging logging.set_verbosity_info() UpperCAmelCase : List[str] = logging.get_logger(__name__) UpperCAmelCase : List[str] = { 'b0': efficientnet.EfficientNetBa, 'b1': efficientnet.EfficientNetBa, 'b2': efficientnet.EfficientNetBa, 'b3': efficientnet.EfficientNetBa, 'b4': efficientnet.EfficientNetBa, 'b5': efficientnet.EfficientNetBa, 'b6': efficientnet.EfficientNetBa, 'b7': efficientnet.EfficientNetBa, } UpperCAmelCase : List[str] = { 'b0': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.0, 'image_size': 224, 'dropout_rate': 0.2, 'dw_padding': [], }, 'b1': { 'hidden_dim': 1280, 'width_coef': 1.0, 'depth_coef': 1.1, 'image_size': 240, 'dropout_rate': 0.2, 'dw_padding': [16], }, 'b2': { 'hidden_dim': 1408, 'width_coef': 1.1, 'depth_coef': 1.2, 'image_size': 260, 'dropout_rate': 0.3, 'dw_padding': [5, 8, 16], }, 'b3': { 'hidden_dim': 1536, 'width_coef': 1.2, 'depth_coef': 1.4, 'image_size': 300, 'dropout_rate': 0.3, 'dw_padding': [5, 18], }, 'b4': { 'hidden_dim': 1792, 'width_coef': 1.4, 'depth_coef': 1.8, 'image_size': 380, 'dropout_rate': 0.4, 'dw_padding': [6], }, 'b5': { 'hidden_dim': 2048, 'width_coef': 1.6, 'depth_coef': 2.2, 'image_size': 456, 'dropout_rate': 0.4, 'dw_padding': [13, 27], }, 'b6': { 'hidden_dim': 2304, 'width_coef': 1.8, 'depth_coef': 2.6, 'image_size': 528, 'dropout_rate': 0.5, 'dw_padding': [31], }, 'b7': { 'hidden_dim': 2560, 'width_coef': 2.0, 'depth_coef': 3.1, 'image_size': 600, 'dropout_rate': 0.5, 'dw_padding': [18], }, } def lowerCamelCase ( _UpperCamelCase : List[Any] ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : List[Any] = EfficientNetConfig() __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""hidden_dim"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""width_coef"""] __UpperCAmelCase : str = CONFIG_MAP[model_name]["""depth_coef"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""dropout_rate"""] __UpperCAmelCase : Union[str, Any] = CONFIG_MAP[model_name]["""dw_padding"""] __UpperCAmelCase : int = """huggingface/label-files""" __UpperCAmelCase : Optional[int] = """imagenet-1k-id2label.json""" __UpperCAmelCase : str = 1_0_0_0 __UpperCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type="""dataset""" ) , """r""" ) ) __UpperCAmelCase : int = {int(_UpperCamelCase ): v for k, v in idalabel.items()} __UpperCAmelCase : Dict = idalabel __UpperCAmelCase : Tuple = {v: k for k, v in idalabel.items()} return config def lowerCamelCase ( ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg""" __UpperCAmelCase : Optional[Any] = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ) return im def lowerCamelCase ( _UpperCamelCase : Any ) -> str: '''simple docstring''' __UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : List[str] = EfficientNetImageProcessor( size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47_853_944, 0.4_732_864, 0.47_434_163] , do_center_crop=_UpperCamelCase , ) return preprocessor def lowerCamelCase ( _UpperCamelCase : Dict ) -> Optional[int]: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )] __UpperCAmelCase : str = sorted(set(_UpperCamelCase ) ) __UpperCAmelCase : Optional[int] = len(_UpperCamelCase ) __UpperCAmelCase : Any = {b: str(_UpperCamelCase ) for b, i in zip(_UpperCamelCase , range(_UpperCamelCase ) )} __UpperCAmelCase : Any = [] rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") ) rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") ) rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") ) rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") ) rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") ) for b in block_names: __UpperCAmelCase : List[str] = block_name_mapping[b] rename_keys.append((f'''block{b}_expand_conv/kernel:0''', f'''encoder.blocks.{hf_b}.expansion.expand_conv.weight''') ) rename_keys.append((f'''block{b}_expand_bn/gamma:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.weight''') ) rename_keys.append((f'''block{b}_expand_bn/beta:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.bias''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_mean''') ) rename_keys.append( (f'''block{b}_expand_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.expansion.expand_bn.running_var''') ) rename_keys.append( (f'''block{b}_dwconv/depthwise_kernel:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight''') ) rename_keys.append((f'''block{b}_bn/gamma:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight''') ) rename_keys.append((f'''block{b}_bn/beta:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias''') ) rename_keys.append( (f'''block{b}_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean''') ) rename_keys.append( (f'''block{b}_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var''') ) rename_keys.append((f'''block{b}_se_reduce/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.weight''') ) rename_keys.append((f'''block{b}_se_reduce/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.reduce.bias''') ) rename_keys.append((f'''block{b}_se_expand/kernel:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.weight''') ) rename_keys.append((f'''block{b}_se_expand/bias:0''', f'''encoder.blocks.{hf_b}.squeeze_excite.expand.bias''') ) rename_keys.append( (f'''block{b}_project_conv/kernel:0''', f'''encoder.blocks.{hf_b}.projection.project_conv.weight''') ) rename_keys.append((f'''block{b}_project_bn/gamma:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.weight''') ) rename_keys.append((f'''block{b}_project_bn/beta:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.bias''') ) rename_keys.append( (f'''block{b}_project_bn/moving_mean:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_mean''') ) rename_keys.append( (f'''block{b}_project_bn/moving_variance:0''', f'''encoder.blocks.{hf_b}.projection.project_bn.running_var''') ) rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") ) rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") ) rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") ) rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") ) rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") ) __UpperCAmelCase : Optional[int] = {} for item in rename_keys: if item[0] in original_param_names: __UpperCAmelCase : Optional[Any] = """efficientnet.""" + item[1] __UpperCAmelCase : Tuple = """classifier.weight""" __UpperCAmelCase : Optional[int] = """classifier.bias""" return key_mapping def lowerCamelCase ( _UpperCamelCase : Any , _UpperCamelCase : Dict , _UpperCamelCase : int ) -> Tuple: '''simple docstring''' for key, value in tf_params.items(): if "normalization" in key: continue __UpperCAmelCase : List[Any] = key_mapping[key] if "_conv" in key and "kernel" in key: __UpperCAmelCase : int = torch.from_numpy(_UpperCamelCase ).permute(3 , 2 , 0 , 1 ) elif "depthwise_kernel" in key: __UpperCAmelCase : Optional[Any] = torch.from_numpy(_UpperCamelCase ).permute(2 , 3 , 0 , 1 ) elif "kernel" in key: __UpperCAmelCase : List[str] = torch.from_numpy(np.transpose(_UpperCamelCase ) ) else: __UpperCAmelCase : Tuple = torch.from_numpy(_UpperCamelCase ) # Replace HF parameters with original TF model parameters assert hf_params[hf_key].shape == new_hf_value.shape hf_params[hf_key].copy_(_UpperCamelCase ) @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[str] ) -> Tuple: '''simple docstring''' __UpperCAmelCase : int = model_classes[model_name]( include_top=_UpperCamelCase , weights="""imagenet""" , input_tensor=_UpperCamelCase , input_shape=_UpperCamelCase , pooling=_UpperCamelCase , classes=1_0_0_0 , classifier_activation="""softmax""" , ) __UpperCAmelCase : List[str] = original_model.trainable_variables __UpperCAmelCase : List[Any] = original_model.non_trainable_variables __UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params} for param in tf_non_train_params: __UpperCAmelCase : int = param.numpy() __UpperCAmelCase : Dict = list(tf_params.keys() ) # Load HuggingFace model __UpperCAmelCase : Optional[Any] = get_efficientnet_config(_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = EfficientNetForImageClassification(_UpperCamelCase ).eval() __UpperCAmelCase : Any = hf_model.state_dict() # Create src-to-dst parameter name mapping dictionary print("""Converting parameters...""" ) __UpperCAmelCase : Tuple = rename_keys(_UpperCamelCase ) replace_params(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Initialize preprocessor and preprocess input image __UpperCAmelCase : List[Any] = convert_image_processor(_UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" ) # HF model inference hf_model.eval() with torch.no_grad(): __UpperCAmelCase : Optional[int] = hf_model(**_UpperCamelCase ) __UpperCAmelCase : Any = outputs.logits.detach().numpy() # Original model inference __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""] __UpperCAmelCase : str = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST ) __UpperCAmelCase : Optional[Any] = image.img_to_array(_UpperCamelCase ) __UpperCAmelCase : Tuple = np.expand_dims(_UpperCamelCase , axis=0 ) __UpperCAmelCase : str = original_model.predict(_UpperCamelCase ) # Check whether original and HF model outputs match -> np.allclose assert np.allclose(_UpperCamelCase , _UpperCamelCase , atol=1E-3 ), "The predicted logits are not the same." print("""Model outputs match!""" ) if save_model: # Create folder to save model if not os.path.isdir(_UpperCamelCase ): os.mkdir(_UpperCamelCase ) # Save converted model and image processor hf_model.save_pretrained(_UpperCamelCase ) preprocessor.save_pretrained(_UpperCamelCase ) if push_to_hub: # Push model and image processor to hub print(f'''Pushing converted {model_name} to the hub...''' ) __UpperCAmelCase : List[str] = f'''efficientnet-{model_name}''' preprocessor.push_to_hub(_UpperCamelCase ) hf_model.push_to_hub(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Any = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='b0', type=str, help='Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].', ) parser.add_argument( '--pytorch_dump_folder_path', default='hf_model', type=str, help='Path to the output PyTorch model directory.', ) parser.add_argument('--save_model', action='store_true', help='Save model to local') parser.add_argument('--push_to_hub', action='store_true', help='Push model and image processor to the hub') UpperCAmelCase : Any = parser.parse_args() convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
320
1
"""simple docstring""" import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import TransformeraDModel, VQDiffusionPipeline, VQDiffusionScheduler, VQModel from diffusers.pipelines.vq_diffusion.pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings from diffusers.utils import load_numpy, slow, torch_device from diffusers.utils.testing_utils import require_torch_gpu UpperCAmelCase : str = False class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : int ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() @property def lowerCamelCase__ ( self : str ): '''simple docstring''' return 12 @property def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' return 12 @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' return 32 @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : List[Any] = VQModel( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , num_vq_embeddings=self.num_embed , vq_embed_dim=3 , ) return model @property def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) return tokenizer @property def lowerCamelCase__ ( self : str ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : List[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , ) return CLIPTextModel(UpperCamelCase ) @property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' torch.manual_seed(0 ) __UpperCAmelCase : List[Any] = 12 __UpperCAmelCase : int = 12 __UpperCAmelCase : int = { """attention_bias""": True, """cross_attention_dim""": 32, """attention_head_dim""": height * width, """num_attention_heads""": 1, """num_vector_embeds""": self.num_embed, """num_embeds_ada_norm""": self.num_embeds_ada_norm, """norm_num_groups""": 32, """sample_size""": width, """activation_fn""": """geglu-approximate""", } __UpperCAmelCase : Union[str, Any] = TransformeraDModel(**UpperCamelCase ) return model def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = """cpu""" __UpperCAmelCase : Optional[int] = self.dummy_vqvae __UpperCAmelCase : str = self.dummy_text_encoder __UpperCAmelCase : Dict = self.dummy_tokenizer __UpperCAmelCase : Tuple = self.dummy_transformer __UpperCAmelCase : Optional[int] = VQDiffusionScheduler(self.num_embed ) __UpperCAmelCase : Any = LearnedClassifierFreeSamplingEmbeddings(learnable=UpperCamelCase ) __UpperCAmelCase : Tuple = VQDiffusionPipeline( vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , ) __UpperCAmelCase : List[Any] = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : Any = """teddy bear playing in the pool""" __UpperCAmelCase : List[str] = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) __UpperCAmelCase : Dict = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" ) __UpperCAmelCase : List[str] = output.images __UpperCAmelCase : List[str] = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) __UpperCAmelCase : Optional[Any] = pipe( [prompt] , generator=UpperCamelCase , output_type="""np""" , return_dict=UpperCamelCase , num_inference_steps=2 )[0] __UpperCAmelCase : Dict = image[0, -3:, -3:, -1] __UpperCAmelCase : Any = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __UpperCAmelCase : Tuple = np.array([0.6551, 0.6168, 0.5008, 0.5676, 0.5659, 0.4295, 0.6073, 0.5599, 0.4992] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[str] = """cpu""" __UpperCAmelCase : Dict = self.dummy_vqvae __UpperCAmelCase : int = self.dummy_text_encoder __UpperCAmelCase : str = self.dummy_tokenizer __UpperCAmelCase : Tuple = self.dummy_transformer __UpperCAmelCase : List[str] = VQDiffusionScheduler(self.num_embed ) __UpperCAmelCase : Optional[int] = LearnedClassifierFreeSamplingEmbeddings( learnable=UpperCamelCase , hidden_size=self.text_embedder_hidden_size , length=tokenizer.model_max_length ) __UpperCAmelCase : Optional[Any] = VQDiffusionPipeline( vqvae=UpperCamelCase , text_encoder=UpperCamelCase , tokenizer=UpperCamelCase , transformer=UpperCamelCase , scheduler=UpperCamelCase , learned_classifier_free_sampling_embeddings=UpperCamelCase , ) __UpperCAmelCase : Optional[int] = pipe.to(UpperCamelCase ) pipe.set_progress_bar_config(disable=UpperCamelCase ) __UpperCAmelCase : Optional[int] = """teddy bear playing in the pool""" __UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) __UpperCAmelCase : Dict = pipe([prompt] , generator=UpperCamelCase , num_inference_steps=2 , output_type="""np""" ) __UpperCAmelCase : Union[str, Any] = output.images __UpperCAmelCase : Dict = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) __UpperCAmelCase : int = pipe( [prompt] , generator=UpperCamelCase , output_type="""np""" , return_dict=UpperCamelCase , num_inference_steps=2 )[0] __UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] __UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 24, 24, 3) __UpperCAmelCase : Optional[Any] = np.array([0.6693, 0.6075, 0.4959, 0.5701, 0.5583, 0.4333, 0.6171, 0.5684, 0.4988] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2.0 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 @slow @require_torch_gpu class lowerCamelCase__ ( unittest.TestCase ): """simple docstring""" def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : str = load_numpy( """https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/vq_diffusion/teddy_bear_pool_classifier_free_sampling.npy""" ) __UpperCAmelCase : Optional[int] = VQDiffusionPipeline.from_pretrained("""microsoft/vq-diffusion-ithq""" ) __UpperCAmelCase : Any = pipeline.to(UpperCamelCase ) pipeline.set_progress_bar_config(disable=UpperCamelCase ) # requires GPU generator for gumbel softmax # don't use GPU generator in tests though __UpperCAmelCase : Union[str, Any] = torch.Generator(device=UpperCamelCase ).manual_seed(0 ) __UpperCAmelCase : Optional[Any] = pipeline( """teddy bear playing in the pool""" , num_images_per_prompt=1 , generator=UpperCamelCase , output_type="""np""" , ) __UpperCAmelCase : str = output.images[0] assert image.shape == (256, 256, 3) assert np.abs(expected_image - image ).max() < 2.0
320
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCamelCase__ ( metaclass=A ): """simple docstring""" __a = ["""keras_nlp"""] def __init__( self : str , *UpperCamelCase : List[Any] , **UpperCamelCase : Dict ): '''simple docstring''' requires_backends(self , ["""keras_nlp"""] )
320
1
"""simple docstring""" from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING UpperCAmelCase : Optional[int] = logging.get_logger(__name__) @add_end_docstrings(A ) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : List[str] , *UpperCamelCase : str , **UpperCamelCase : List[Any] ): '''simple docstring''' super().__init__(*UpperCamelCase , **UpperCamelCase ) self.check_model_type(UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[Any]=None , UpperCamelCase : Tuple=None , UpperCamelCase : List[Any]=None , **UpperCamelCase : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase ,__UpperCAmelCase : Optional[Any] = {}, {} if padding is not None: __UpperCAmelCase : List[Any] = padding if truncation is not None: __UpperCAmelCase : str = truncation if top_k is not None: __UpperCAmelCase : Tuple = top_k return preprocess_params, {}, postprocess_params def __call__( self : Tuple , UpperCamelCase : Union["Image.Image", str] , UpperCamelCase : str = None , **UpperCamelCase : List[str] ): '''simple docstring''' if isinstance(UpperCamelCase , (Image.Image, str) ) and isinstance(UpperCamelCase , UpperCamelCase ): __UpperCAmelCase : Dict = {"""image""": image, """question""": question} else: __UpperCAmelCase : Tuple = image __UpperCAmelCase : Union[str, Any] = super().__call__(UpperCamelCase , **UpperCamelCase ) return results def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] , UpperCamelCase : int=False , UpperCamelCase : int=False ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = load_image(inputs["""image"""] ) __UpperCAmelCase : int = self.tokenizer( inputs["""question"""] , return_tensors=self.framework , padding=UpperCamelCase , truncation=UpperCamelCase ) __UpperCAmelCase : List[Any] = self.image_processor(images=UpperCamelCase , return_tensors=self.framework ) model_inputs.update(UpperCamelCase ) return model_inputs def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : int ): '''simple docstring''' __UpperCAmelCase : int = self.model(**UpperCamelCase ) return model_outputs def lowerCamelCase__ ( self : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: __UpperCAmelCase : Tuple = self.model.config.num_labels if self.framework == "pt": __UpperCAmelCase : Optional[int] = model_outputs.logits.sigmoid()[0] __UpperCAmelCase ,__UpperCAmelCase : int = probs.topk(UpperCamelCase ) else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) __UpperCAmelCase : List[str] = scores.tolist() __UpperCAmelCase : List[str] = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase , UpperCamelCase )]
320
"""simple docstring""" UpperCAmelCase : Dict = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def lowerCamelCase ( _UpperCamelCase : bytes ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Any = f'''a bytes-like object is required, not \'{data.__class__.__name__}\'''' raise TypeError(_UpperCamelCase ) __UpperCAmelCase : str = """""".join(bin(_UpperCamelCase )[2:].zfill(8 ) for byte in data ) __UpperCAmelCase : int = len(_UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __UpperCAmelCase : Dict = b"""=""" * ((6 - len(_UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(_UpperCamelCase ) % 6) else: __UpperCAmelCase : List[str] = b"""""" # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(_UpperCamelCase ) , 6 ) ).encode() + padding ) def lowerCamelCase ( _UpperCamelCase : str ) -> bytes: '''simple docstring''' if not isinstance(_UpperCamelCase , _UpperCamelCase ) and not isinstance(_UpperCamelCase , _UpperCamelCase ): __UpperCAmelCase : Tuple = ( """argument should be a bytes-like object or ASCII string, """ f'''not \'{encoded_data.__class__.__name__}\'''' ) raise TypeError(_UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(_UpperCamelCase , _UpperCamelCase ): try: __UpperCAmelCase : Optional[Any] = encoded_data.decode("""utf-8""" ) except UnicodeDecodeError: raise ValueError("""base64 encoded data should only contain ASCII characters""" ) __UpperCAmelCase : str = encoded_data.count("""=""" ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(_UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __UpperCAmelCase : List[str] = encoded_data[:-padding] __UpperCAmelCase : int = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __UpperCAmelCase : Optional[Any] = """""".join( bin(B64_CHARSET.index(_UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __UpperCAmelCase : List[Any] = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(_UpperCamelCase ) , 8 ) ] return bytes(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
320
1
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() UpperCAmelCase : Tuple = logging.get_logger(__name__) def lowerCamelCase ( _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : str ) -> str: '''simple docstring''' __UpperCAmelCase : int = WavaVecaForSequenceClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) __UpperCAmelCase : int = downstream_dict["""projector.weight"""] __UpperCAmelCase : Optional[int] = downstream_dict["""projector.bias"""] __UpperCAmelCase : Any = downstream_dict["""model.post_net.linear.weight"""] __UpperCAmelCase : List[Any] = downstream_dict["""model.post_net.linear.bias"""] return model def lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : int , _UpperCamelCase : Tuple ) -> Union[str, Any]: '''simple docstring''' __UpperCAmelCase : str = WavaVecaForAudioFrameClassification.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = downstream_dict["""model.linear.weight"""] __UpperCAmelCase : str = downstream_dict["""model.linear.bias"""] return model def lowerCamelCase ( _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[Any] ) -> int: '''simple docstring''' __UpperCAmelCase : Union[str, Any] = WavaVecaForXVector.from_pretrained(_UpperCamelCase , config=_UpperCamelCase ) __UpperCAmelCase : Dict = downstream_dict["""connector.weight"""] __UpperCAmelCase : Any = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __UpperCAmelCase : str = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __UpperCAmelCase : List[Any] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __UpperCAmelCase : Dict = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __UpperCAmelCase : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __UpperCAmelCase : Any = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __UpperCAmelCase : Union[str, Any] = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __UpperCAmelCase : Any = downstream_dict["""objective.W"""] return model @torch.no_grad() def lowerCamelCase ( _UpperCamelCase : List[Any] , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = torch.load(_UpperCamelCase , map_location="""cpu""" ) __UpperCAmelCase : str = checkpoint["""Downstream"""] __UpperCAmelCase : str = WavaVecaConfig.from_pretrained(_UpperCamelCase ) __UpperCAmelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained( _UpperCamelCase , return_attention_mask=_UpperCamelCase , do_normalize=_UpperCamelCase ) __UpperCAmelCase : Optional[Any] = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __UpperCAmelCase : Tuple = convert_classification(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) elif arch.endswith("""ForAudioFrameClassification""" ): __UpperCAmelCase : List[Any] = convert_diarization(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) elif arch.endswith("""ForXVector""" ): __UpperCAmelCase : Optional[Any] = convert_xvector(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __UpperCAmelCase : Optional[Any] = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(_UpperCamelCase ) hf_model.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": UpperCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') UpperCAmelCase : Dict = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
320
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCAmelCase : str = logging.get_logger(__name__) class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Optional[Any] , *UpperCamelCase : str , **UpperCamelCase : List[str] ): '''simple docstring''' warnings.warn( """The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use ChineseCLIPImageProcessor instead.""" , UpperCamelCase , ) super().__init__(*UpperCamelCase , **UpperCamelCase )
320
1
"""simple docstring""" import json import os from collections import Counter import torch import torchvision import torchvision.transforms as transforms from PIL import Image from torch import nn from torch.utils.data import Dataset UpperCAmelCase : str = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)} class lowerCamelCase__ ( nn.Module ): """simple docstring""" def __init__( self : Any , UpperCamelCase : str ): '''simple docstring''' super().__init__() __UpperCAmelCase : Union[str, Any] = torchvision.models.resnetaaa(pretrained=UpperCamelCase ) __UpperCAmelCase : int = list(model.children() )[:-2] __UpperCAmelCase : List[Any] = nn.Sequential(*UpperCamelCase ) __UpperCAmelCase : str = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] ) def lowerCamelCase__ ( self : Dict , UpperCamelCase : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.pool(self.model(UpperCamelCase ) ) __UpperCAmelCase : List[Any] = torch.flatten(UpperCamelCase , start_dim=2 ) __UpperCAmelCase : Any = out.transpose(1 , 2 ).contiguous() return out # BxNx2048 class lowerCamelCase__ ( A ): """simple docstring""" def __init__( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : str ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [json.loads(UpperCamelCase ) for l in open(UpperCamelCase )] __UpperCAmelCase : Any = os.path.dirname(UpperCamelCase ) __UpperCAmelCase : List[str] = tokenizer __UpperCAmelCase : str = labels __UpperCAmelCase : Optional[int] = len(UpperCamelCase ) __UpperCAmelCase : int = max_seq_length __UpperCAmelCase : int = transforms def __len__( self : List[str] ): '''simple docstring''' return len(self.data ) def __getitem__( self : List[str] , UpperCamelCase : Any ): '''simple docstring''' __UpperCAmelCase : Tuple = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] , add_special_tokens=UpperCamelCase ) ) __UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase : Dict = sentence[0], sentence[1:-1], sentence[-1] __UpperCAmelCase : Any = sentence[: self.max_seq_length] __UpperCAmelCase : Tuple = torch.zeros(self.n_classes ) __UpperCAmelCase : str = 1 __UpperCAmelCase : Any = Image.open(os.path.join(self.data_dir , self.data[index]["""img"""] ) ).convert("""RGB""" ) __UpperCAmelCase : Optional[int] = self.transforms(UpperCamelCase ) return { "image_start_token": start_token, "image_end_token": end_token, "sentence": sentence, "image": image, "label": label, } def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : Any = Counter() for row in self.data: label_freqs.update(row["""label"""] ) return label_freqs def lowerCamelCase ( _UpperCamelCase : Union[str, Any] ) -> Any: '''simple docstring''' __UpperCAmelCase : Any = [len(row["""sentence"""] ) for row in batch] __UpperCAmelCase ,__UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ), max(_UpperCamelCase ) __UpperCAmelCase : Any = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) __UpperCAmelCase : str = torch.zeros(_UpperCamelCase , _UpperCamelCase , dtype=torch.long ) for i_batch, (input_row, length) in enumerate(zip(_UpperCamelCase , _UpperCamelCase ) ): __UpperCAmelCase : List[str] = input_row["""sentence"""] __UpperCAmelCase : Tuple = 1 __UpperCAmelCase : int = torch.stack([row["""image"""] for row in batch] ) __UpperCAmelCase : Optional[Any] = torch.stack([row["""label"""] for row in batch] ) __UpperCAmelCase : str = torch.stack([row["""image_start_token"""] for row in batch] ) __UpperCAmelCase : int = torch.stack([row["""image_end_token"""] for row in batch] ) return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor def lowerCamelCase ( ) -> int: '''simple docstring''' return [ "Crime", "Drama", "Thriller", "Action", "Comedy", "Romance", "Documentary", "Short", "Mystery", "History", "Family", "Adventure", "Fantasy", "Sci-Fi", "Western", "Horror", "Sport", "War", "Music", "Musical", "Animation", "Biography", "Film-Noir", ] def lowerCamelCase ( ) -> Optional[Any]: '''simple docstring''' return transforms.Compose( [ transforms.Resize(2_5_6 ), transforms.CenterCrop(2_2_4 ), transforms.ToTensor(), transforms.Normalize( mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ), ] )
320
"""simple docstring""" import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = LEDTokenizer __a = LEDTokenizerFast __a = True def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' super().setUp() __UpperCAmelCase : Tuple = [ """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """\u0120""", """\u0120l""", """\u0120n""", """\u0120lo""", """\u0120low""", """er""", """\u0120lowest""", """\u0120newer""", """\u0120wider""", """<unk>""", ] __UpperCAmelCase : str = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) ) __UpperCAmelCase : Union[str, Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""] __UpperCAmelCase : Dict = {"""unk_token""": """<unk>"""} __UpperCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __UpperCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase ) ) def lowerCamelCase__ ( self : Tuple , **UpperCamelCase : int ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : Optional[int] , **UpperCamelCase : List[str] ): '''simple docstring''' kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **UpperCamelCase ) def lowerCamelCase__ ( self : str , UpperCamelCase : Any ): '''simple docstring''' return "lower newer", "lower newer" @cached_property def lowerCamelCase__ ( self : Dict ): '''simple docstring''' return LEDTokenizer.from_pretrained("""allenai/led-base-16384""" ) @cached_property def lowerCamelCase__ ( self : str ): '''simple docstring''' return LEDTokenizerFast.from_pretrained("""allenai/led-base-16384""" ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] __UpperCAmelCase : Union[str, Any] = [0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = tokenizer(UpperCamelCase , max_length=len(UpperCamelCase ) , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) __UpperCAmelCase : Optional[Any] = batch.input_ids.tolist()[0] self.assertListEqual(UpperCamelCase , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Optional[int] = ["""A long paragraph for summarization.""", """Another paragraph for summarization."""] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[int] = tokenizer(UpperCamelCase , padding=UpperCamelCase , return_tensors="""pt""" ) self.assertIn("""input_ids""" , UpperCamelCase ) self.assertIn("""attention_mask""" , UpperCamelCase ) self.assertNotIn("""labels""" , UpperCamelCase ) self.assertNotIn("""decoder_attention_mask""" , UpperCamelCase ) @require_torch def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : Optional[Any] = [ """Summary of the text.""", """Another summary.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Optional[Any] = tokenizer(text_target=UpperCamelCase , max_length=32 , padding="""max_length""" , return_tensors="""pt""" ) self.assertEqual(32 , targets["""input_ids"""].shape[1] ) @require_torch def lowerCamelCase__ ( self : List[str] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : str = tokenizer( ["""I am a small frog""" * 1_024, """I am a small frog"""] , padding=UpperCamelCase , truncation=UpperCamelCase , return_tensors="""pt""" ) self.assertIsInstance(UpperCamelCase , UpperCamelCase ) self.assertEqual(batch.input_ids.shape , (2, 5_122) ) @require_torch def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ["""A long paragraph for summarization."""] __UpperCAmelCase : int = [ """Summary of the text.""", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Tuple = tokenizer(text_target=UpperCamelCase , return_tensors="""pt""" ) __UpperCAmelCase : Optional[Any] = inputs["""input_ids"""] __UpperCAmelCase : List[str] = targets["""input_ids"""] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: __UpperCAmelCase : Any = ["""Summary of the text.""", """Another summary."""] __UpperCAmelCase : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] __UpperCAmelCase : List[str] = tokenizer(UpperCamelCase , padding=UpperCamelCase ) __UpperCAmelCase : str = [[0] * len(UpperCamelCase ) for x in encoded_output["""input_ids"""]] __UpperCAmelCase : List[Any] = tokenizer.pad(UpperCamelCase ) self.assertSequenceEqual(outputs["""global_attention_mask"""] , UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): __UpperCAmelCase : Optional[Any] = self.rust_tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Tuple = self.tokenizer_class.from_pretrained(UpperCamelCase , **UpperCamelCase ) __UpperCAmelCase : Any = """A, <mask> AllenNLP sentence.""" __UpperCAmelCase : Dict = tokenizer_r.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) __UpperCAmelCase : List[Any] = tokenizer_p.encode_plus(UpperCamelCase , add_special_tokens=UpperCamelCase , return_token_type_ids=UpperCamelCase ) self.assertEqual(sum(tokens_r["""token_type_ids"""] ) , sum(tokens_p["""token_type_ids"""] ) ) self.assertEqual( sum(tokens_r["""attention_mask"""] ) / len(tokens_r["""attention_mask"""] ) , sum(tokens_p["""attention_mask"""] ) / len(tokens_p["""attention_mask"""] ) , ) __UpperCAmelCase : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r["""input_ids"""] ) __UpperCAmelCase : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["""input_ids"""] ) self.assertSequenceEqual(tokens_p["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual(tokens_r["""input_ids"""] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] ) self.assertSequenceEqual( UpperCamelCase , ["""<s>""", """A""", """,""", """<mask>""", """ĠAllen""", """N""", """LP""", """Ġsentence""", """.""", """</s>"""] )
320
1
"""simple docstring""" import re def lowerCamelCase ( _UpperCamelCase : str ) -> str: '''simple docstring''' if len(re.findall("""[ATCG]""" , _UpperCamelCase ) ) != len(_UpperCamelCase ): raise ValueError("""Invalid Strand""" ) return dna.translate(dna.maketrans("""ATCG""" , """TAGC""" ) ) if __name__ == "__main__": import doctest doctest.testmod()
320
"""simple docstring""" from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class lowerCamelCase__ : """simple docstring""" def __init__( self : List[str] , UpperCamelCase : int , UpperCamelCase : List[Any]=13 , UpperCamelCase : Tuple=7 , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=True , UpperCamelCase : Dict=True , UpperCamelCase : List[Any]=True , UpperCamelCase : int=99 , UpperCamelCase : Any=[1, 1, 2] , UpperCamelCase : Optional[Any]=1 , UpperCamelCase : Optional[Any]=32 , UpperCamelCase : Optional[int]=4 , UpperCamelCase : Union[str, Any]=8 , UpperCamelCase : int=37 , UpperCamelCase : Optional[Any]="gelu_new" , UpperCamelCase : Any=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : int=0.0 , UpperCamelCase : Union[str, Any]=512 , UpperCamelCase : Any=3 , UpperCamelCase : Optional[int]=0.02 , UpperCamelCase : Union[str, Any]=3 , UpperCamelCase : Union[str, Any]=4 , UpperCamelCase : str=None , UpperCamelCase : Tuple=False , ): '''simple docstring''' __UpperCAmelCase : int = parent __UpperCAmelCase : int = batch_size __UpperCAmelCase : str = seq_length __UpperCAmelCase : Optional[Any] = is_training __UpperCAmelCase : Optional[Any] = use_input_mask __UpperCAmelCase : Tuple = use_token_type_ids __UpperCAmelCase : List[str] = use_labels __UpperCAmelCase : Tuple = vocab_size __UpperCAmelCase : Optional[int] = block_sizes __UpperCAmelCase : Optional[Any] = num_decoder_layers __UpperCAmelCase : Union[str, Any] = d_model __UpperCAmelCase : Dict = n_head __UpperCAmelCase : Optional[Any] = d_head __UpperCAmelCase : Dict = d_inner __UpperCAmelCase : Any = hidden_act __UpperCAmelCase : Optional[Any] = hidden_dropout __UpperCAmelCase : List[Any] = attention_dropout __UpperCAmelCase : str = activation_dropout __UpperCAmelCase : Union[str, Any] = max_position_embeddings __UpperCAmelCase : List[Any] = type_vocab_size __UpperCAmelCase : str = 2 __UpperCAmelCase : Optional[Any] = num_labels __UpperCAmelCase : List[Any] = num_choices __UpperCAmelCase : Any = scope __UpperCAmelCase : Dict = initializer_std # Used in the tests to check the size of the first attention layer __UpperCAmelCase : Dict = n_head # Used in the tests to check the size of the first hidden state __UpperCAmelCase : Dict = self.d_model # Used in the tests to check the number of output hidden states/attentions __UpperCAmelCase : Dict = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: __UpperCAmelCase : List[Any] = self.num_hidden_layers + 2 def lowerCamelCase__ ( self : Any ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __UpperCAmelCase : List[str] = None if self.use_input_mask: __UpperCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) __UpperCAmelCase : int = None if self.use_token_type_ids: __UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __UpperCAmelCase : List[Any] = None __UpperCAmelCase : Dict = None __UpperCAmelCase : Optional[Any] = None if self.use_labels: __UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __UpperCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices ) __UpperCAmelCase : str = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def lowerCamelCase__ ( self : Any , UpperCamelCase : Any , UpperCamelCase : Tuple , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : List[str] = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = [input_ids, input_mask] __UpperCAmelCase : Dict = model(UpperCamelCase ) __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) __UpperCAmelCase : Any = False __UpperCAmelCase : Optional[int] = TFFunnelModel(config=UpperCamelCase ) __UpperCAmelCase : List[str] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) __UpperCAmelCase : int = [input_ids, input_mask] __UpperCAmelCase : int = model(UpperCamelCase ) __UpperCAmelCase : List[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) __UpperCAmelCase : List[Any] = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : Union[str, Any] = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) __UpperCAmelCase : int = False __UpperCAmelCase : str = TFFunnelBaseModel(config=UpperCamelCase ) __UpperCAmelCase : str = model(UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def lowerCamelCase__ ( self : Union[str, Any] , UpperCamelCase : Any , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : int , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , ): '''simple docstring''' __UpperCAmelCase : Tuple = TFFunnelForPreTraining(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : int , UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : Tuple , UpperCamelCase : Tuple , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : int = TFFunnelForMaskedLM(config=UpperCamelCase ) __UpperCAmelCase : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Optional[Any] = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCamelCase__ ( self : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : Optional[int] , UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_labels __UpperCAmelCase : Optional[Any] = TFFunnelForSequenceClassification(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Tuple = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCamelCase__ ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : str , UpperCamelCase : str , UpperCamelCase : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : int , ): '''simple docstring''' __UpperCAmelCase : Dict = self.num_choices __UpperCAmelCase : str = TFFunnelForMultipleChoice(config=UpperCamelCase ) __UpperCAmelCase : Optional[Any] = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : str = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : int = tf.tile(tf.expand_dims(UpperCamelCase , 1 ) , (1, self.num_choices, 1) ) __UpperCAmelCase : List[str] = { """input_ids""": multiple_choice_inputs_ids, """attention_mask""": multiple_choice_input_mask, """token_type_ids""": multiple_choice_token_type_ids, } __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCamelCase__ ( self : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : Any , UpperCamelCase : List[Any] , UpperCamelCase : int , UpperCamelCase : Any , ): '''simple docstring''' __UpperCAmelCase : int = self.num_labels __UpperCAmelCase : str = TFFunnelForTokenClassification(config=UpperCamelCase ) __UpperCAmelCase : Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : int = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCamelCase__ ( self : str , UpperCamelCase : int , UpperCamelCase : Any , UpperCamelCase : List[str] , UpperCamelCase : str , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[Any] , ): '''simple docstring''' __UpperCAmelCase : Any = TFFunnelForQuestionAnswering(config=UpperCamelCase ) __UpperCAmelCase : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids} __UpperCAmelCase : Any = model(UpperCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCamelCase__ ( self : Optional[int] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.prepare_config_and_inputs() ( ( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) ,( __UpperCAmelCase ) , ) : Dict = config_and_inputs __UpperCAmelCase : int = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_tf class lowerCamelCase__ ( A , A , unittest.TestCase ): """simple docstring""" __a = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __a = ( { """feature-extraction""": (TFFunnelBaseModel, TFFunnelModel), """fill-mask""": TFFunnelForMaskedLM, """question-answering""": TFFunnelForQuestionAnswering, """text-classification""": TFFunnelForSequenceClassification, """token-classification""": TFFunnelForTokenClassification, """zero-shot""": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __a = False __a = False def lowerCamelCase__ ( self : Dict ): '''simple docstring''' __UpperCAmelCase : List[Any] = TFFunnelModelTester(self ) __UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) def lowerCamelCase__ ( self : int ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*UpperCamelCase ) def lowerCamelCase__ ( self : Tuple ): '''simple docstring''' __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase ) def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase ) @require_tf class lowerCamelCase__ ( A , unittest.TestCase ): """simple docstring""" __a = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __a = False __a = False def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : List[str] = TFFunnelModelTester(self , base=UpperCamelCase ) __UpperCAmelCase : List[Any] = ConfigTester(self , config_class=UpperCamelCase ) def lowerCamelCase__ ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def lowerCamelCase__ ( self : Union[str, Any] ): '''simple docstring''' __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*UpperCamelCase ) def lowerCamelCase__ ( self : str ): '''simple docstring''' __UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase ) def lowerCamelCase__ ( self : Optional[Any] ): '''simple docstring''' __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase )
320
1