code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import os from io import BytesIO from pathlib import Path import requests from clip_retrieval.clip_client import ClipClient from PIL import Image from tqdm import tqdm def lowercase__( __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : Union[str, Any] = 1.5 lowercase_ : str = int(factor * num_class_images ) lowercase_ : Union[str, Any] = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 ) os.makedirs(F'''{class_data_dir}/images''' , exist_ok=__SCREAMING_SNAKE_CASE ) if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images: return while True: lowercase_ : Any = client.query(text=__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) >= factor * num_class_images or num_images > 1E4: break else: lowercase_ : Dict = int(factor * num_images ) lowercase_ : Optional[int] = ClipClient( url='https://knn.laion.ai/knn-service' , indice_name='laion_400m' , num_images=__SCREAMING_SNAKE_CASE , aesthetic_weight=0.1 , ) lowercase_ : Optional[Any] = 0 lowercase_ : Union[str, Any] = 0 lowercase_ : int = tqdm(desc='downloading real regularization images' , total=__SCREAMING_SNAKE_CASE ) with open(F'''{class_data_dir}/caption.txt''' , 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''' , 'w' ) as fa, open( F'''{class_data_dir}/images.txt''' , 'w' ) as fa: while total < num_class_images: lowercase_ : int = class_images[count] count += 1 try: lowercase_ : str = requests.get(images['url'] ) if img.status_code == 2_00: lowercase_ : List[Any] = Image.open(BytesIO(img.content ) ) with open(F'''{class_data_dir}/images/{total}.jpg''' , 'wb' ) as f: f.write(img.content ) fa.write(images['caption'] + '\n' ) fa.write(images['url'] + '\n' ) fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' ) total += 1 pbar.update(1 ) else: continue except Exception: continue return def lowercase__( ): lowercase_ : Tuple = argparse.ArgumentParser('' , add_help=__SCREAMING_SNAKE_CASE ) parser.add_argument('--class_prompt' , help='text prompt to retrieve images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) parser.add_argument('--class_data_dir' , help='path to save images' , required=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE ) parser.add_argument('--num_class_images' , help='number of images to download' , default=2_00 , type=__SCREAMING_SNAKE_CASE ) return parser.parse_args() if __name__ == "__main__": __SCREAMING_SNAKE_CASE =parse_args() retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
321
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
1
"""simple docstring""" from typing import Callable, List, Optional, Union import PIL import torch from transformers import ( CLIPImageProcessor, CLIPSegForImageSegmentation, CLIPSegProcessor, CLIPTextModel, CLIPTokenizer, ) from diffusers import DiffusionPipeline from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL, UNetaDConditionModel from diffusers.pipelines.stable_diffusion import StableDiffusionInpaintPipeline from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler from diffusers.utils import deprecate, is_accelerate_available, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> List[str]: '''simple docstring''' super().__init__() if hasattr(scheduler.config ,'steps_offset' ) and scheduler.config.steps_offset != 1: lowercase_ : int = ( f'''The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`''' f''' should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure ''' 'to update the config accordingly as leaving `steps_offset` might led to incorrect results' ' in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,' ' it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`' ' file' ) deprecate('steps_offset!=1' ,'1.0.0' ,__UpperCamelCase ,standard_warn=__UpperCamelCase ) lowercase_ : Dict = dict(scheduler.config ) lowercase_ : int = 1 lowercase_ : Any = FrozenDict(__UpperCamelCase ) if hasattr(scheduler.config ,'skip_prk_steps' ) and scheduler.config.skip_prk_steps is False: lowercase_ : List[Any] = ( f'''The configuration file of this scheduler: {scheduler} has not set the configuration''' ' `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make' ' sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to' ' incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face' ' Hub, it would be very nice if you could open a Pull request for the' ' `scheduler/scheduler_config.json` file' ) deprecate('skip_prk_steps not set' ,'1.0.0' ,__UpperCamelCase ,standard_warn=__UpperCamelCase ) lowercase_ : Optional[int] = dict(scheduler.config ) lowercase_ : List[Any] = True lowercase_ : int = FrozenDict(__UpperCamelCase ) if safety_checker is None: logger.warning( f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure''' ' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered' ' results in services or applications open to the public. Both the diffusers team and Hugging Face' ' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling' ' it only for use-cases that involve analyzing network behavior or auditing its results. For more' ' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' ) self.register_modules( segmentation_model=__UpperCamelCase ,segmentation_processor=__UpperCamelCase ,vae=__UpperCamelCase ,text_encoder=__UpperCamelCase ,tokenizer=__UpperCamelCase ,unet=__UpperCamelCase ,scheduler=__UpperCamelCase ,safety_checker=__UpperCamelCase ,feature_extractor=__UpperCamelCase ,) def _UpperCAmelCase ( self ,__UpperCamelCase = "auto" ) -> Optional[int]: '''simple docstring''' if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase_ : List[Any] = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' self.enable_attention_slicing(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) lowercase_ : Optional[Any] = torch.device('cuda' ) for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.safety_checker]: if cpu_offloaded_model is not None: cpu_offload(__UpperCamelCase ,__UpperCamelCase ) @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _UpperCAmelCase ( self ) -> int: '''simple docstring''' if self.device != torch.device('meta' ) or not hasattr(self.unet ,'_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(__UpperCamelCase ,'_hf_hook' ) and hasattr(module._hf_hook ,'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() def __call__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 512 ,__UpperCamelCase = 512 ,__UpperCamelCase = 50 ,__UpperCamelCase = 7.5 ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "pil" ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = self.segmentation_processor( text=[text] ,images=[image] ,padding='max_length' ,return_tensors='pt' ).to(self.device ) lowercase_ : int = self.segmentation_model(**__UpperCamelCase ) lowercase_ : str = torch.sigmoid(outputs.logits ).cpu().detach().unsqueeze(-1 ).numpy() lowercase_ : Optional[int] = self.numpy_to_pil(__UpperCamelCase )[0].resize(image.size ) # Run inpainting pipeline with the generated mask lowercase_ : Optional[int] = StableDiffusionInpaintPipeline( vae=self.vae ,text_encoder=self.text_encoder ,tokenizer=self.tokenizer ,unet=self.unet ,scheduler=self.scheduler ,safety_checker=self.safety_checker ,feature_extractor=self.feature_extractor ,) return inpainting_pipeline( prompt=__UpperCamelCase ,image=__UpperCamelCase ,mask_image=__UpperCamelCase ,height=__UpperCamelCase ,width=__UpperCamelCase ,num_inference_steps=__UpperCamelCase ,guidance_scale=__UpperCamelCase ,negative_prompt=__UpperCamelCase ,num_images_per_prompt=__UpperCamelCase ,eta=__UpperCamelCase ,generator=__UpperCamelCase ,latents=__UpperCamelCase ,output_type=__UpperCamelCase ,return_dict=__UpperCamelCase ,callback=__UpperCamelCase ,callback_steps=__UpperCamelCase ,)
321
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
1
"""simple docstring""" from queue import Queue from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from ..models.auto import AutoTokenizer class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' raise NotImplementedError() def _UpperCAmelCase ( self ) -> int: '''simple docstring''' raise NotImplementedError() class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase = False ,**__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Any = tokenizer lowercase_ : List[Any] = skip_prompt lowercase_ : Optional[Any] = decode_kwargs # variables used in the streaming process lowercase_ : Tuple = [] lowercase_ : List[Any] = 0 lowercase_ : Tuple = True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if len(value.shape ) > 1 and value.shape[0] > 1: raise ValueError('TextStreamer only supports batch size 1' ) elif len(value.shape ) > 1: lowercase_ : int = value[0] if self.skip_prompt and self.next_tokens_are_prompt: lowercase_ : str = False return # Add the new token to the cache and decodes the entire thing. self.token_cache.extend(value.tolist() ) lowercase_ : Optional[int] = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) # After the symbol for a new line, we flush the cache. if text.endswith('\n' ): lowercase_ : Optional[int] = text[self.print_len :] lowercase_ : str = [] lowercase_ : Tuple = 0 # If the last token is a CJK character, we print the characters. elif len(__UpperCamelCase ) > 0 and self._is_chinese_char(ord(text[-1] ) ): lowercase_ : str = text[self.print_len :] self.print_len += len(__UpperCamelCase ) # Otherwise, prints until the last space char (simple heuristic to avoid printing incomplete words, # which may change with the subsequent token -- there are probably smarter ways to do this!) else: lowercase_ : List[Any] = text[self.print_len : text.rfind(' ' ) + 1] self.print_len += len(__UpperCamelCase ) self.on_finalized_text(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if len(self.token_cache ) > 0: lowercase_ : Any = self.tokenizer.decode(self.token_cache ,**self.decode_kwargs ) lowercase_ : Optional[int] = text[self.print_len :] lowercase_ : Union[str, Any] = [] lowercase_ : Dict = 0 else: lowercase_ : Union[str, Any] = '' lowercase_ : Optional[int] = True self.on_finalized_text(__UpperCamelCase ,stream_end=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ) -> Union[str, Any]: '''simple docstring''' print(__UpperCamelCase ,flush=__UpperCamelCase ,end='' if not stream_end else None ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' if ( (cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F) or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) # or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) # or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) # or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) # or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) # or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F) or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) # ): # return True return False class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase = False ,__UpperCamelCase = None ,**__UpperCamelCase ) -> str: '''simple docstring''' super().__init__(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Tuple = Queue() lowercase_ : List[str] = None lowercase_ : Union[str, Any] = timeout def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = False ) -> List[str]: '''simple docstring''' self.text_queue.put(__UpperCamelCase ,timeout=self.timeout ) if stream_end: self.text_queue.put(self.stop_signal ,timeout=self.timeout ) def __iter__( self ) -> Union[str, Any]: '''simple docstring''' return self def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = self.text_queue.get(timeout=self.timeout ) if value == self.stop_signal: raise StopIteration() else: return value
321
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
1
"""simple docstring""" import collections import gzip import os import urllib import numpy from tensorflow.python.framework import dtypes, random_seed from tensorflow.python.platform import gfile from tensorflow.python.util.deprecation import deprecated __SCREAMING_SNAKE_CASE =collections.namedtuple("_Datasets", ["train", "validation", "test"]) # CVDF mirror of http://yann.lecun.com/exdb/mnist/ __SCREAMING_SNAKE_CASE ="https://storage.googleapis.com/cvdf-datasets/mnist/" def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : str = numpy.dtype(numpy.uintaa ).newbyteorder('>' ) return numpy.frombuffer(bytestream.read(4 ) , dtype=__SCREAMING_SNAKE_CASE )[0] @deprecated(__SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' ) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=__SCREAMING_SNAKE_CASE ) as bytestream: lowercase_ : Dict = _readaa(__SCREAMING_SNAKE_CASE ) if magic != 20_51: raise ValueError( 'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) ) lowercase_ : int = _readaa(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = _readaa(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = _readaa(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = bytestream.read(rows * cols * num_images ) lowercase_ : List[Any] = numpy.frombuffer(__SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) lowercase_ : List[Any] = data.reshape(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 1 ) return data @deprecated(__SCREAMING_SNAKE_CASE , 'Please use tf.one_hot on tensors.' ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : List[str] = labels_dense.shape[0] lowercase_ : Union[str, Any] = numpy.arange(__SCREAMING_SNAKE_CASE ) * num_classes lowercase_ : str = numpy.zeros((num_labels, num_classes) ) lowercase_ : int = 1 return labels_one_hot @deprecated(__SCREAMING_SNAKE_CASE , 'Please use tf.data to implement this functionality.' ) def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Dict=10 ): print('Extracting' , f.name ) with gzip.GzipFile(fileobj=__SCREAMING_SNAKE_CASE ) as bytestream: lowercase_ : Optional[int] = _readaa(__SCREAMING_SNAKE_CASE ) if magic != 20_49: raise ValueError( 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) ) lowercase_ : Dict = _readaa(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = bytestream.read(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = numpy.frombuffer(__SCREAMING_SNAKE_CASE , dtype=numpy.uinta ) if one_hot: return _dense_to_one_hot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return labels class UpperCamelCase : @deprecated( __UpperCamelCase ,'Please use alternatives such as official/mnist/_DataSet.py' ' from tensorflow/models.' ,) def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=dtypes.floataa ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ : List[str] = random_seed.get_seed(__UpperCamelCase ) # If op level seed is not set, use whatever graph level seed is returned numpy.random.seed(seeda if seed is None else seeda ) lowercase_ : Tuple = dtypes.as_dtype(__UpperCamelCase ).base_dtype if dtype not in (dtypes.uinta, dtypes.floataa): raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype ) if fake_data: lowercase_ : str = 1_0000 lowercase_ : Any = one_hot else: assert ( images.shape[0] == labels.shape[0] ), f'''images.shape: {images.shape} labels.shape: {labels.shape}''' lowercase_ : Optional[Any] = images.shape[0] # Convert shape from [num examples, rows, columns, depth] # to [num examples, rows*columns] (assuming depth == 1) if reshape: assert images.shape[3] == 1 lowercase_ : List[str] = images.reshape( images.shape[0] ,images.shape[1] * images.shape[2] ) if dtype == dtypes.floataa: # Convert from [0, 255] -> [0.0, 1.0]. lowercase_ : Optional[int] = images.astype(numpy.floataa ) lowercase_ : int = numpy.multiply(__UpperCamelCase ,1.0 / 255.0 ) lowercase_ : Any = images lowercase_ : str = labels lowercase_ : str = 0 lowercase_ : Dict = 0 @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return self._images @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return self._labels @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return self._num_examples @property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self._epochs_completed def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=True ) -> Optional[Any]: '''simple docstring''' if fake_data: lowercase_ : str = [1] * 784 lowercase_ : List[str] = [1] + [0] * 9 if self.one_hot else 0 return ( [fake_image for _ in range(__UpperCamelCase )], [fake_label for _ in range(__UpperCamelCase )], ) lowercase_ : List[str] = self._index_in_epoch # Shuffle for the first epoch if self._epochs_completed == 0 and start == 0 and shuffle: lowercase_ : int = numpy.arange(self._num_examples ) numpy.random.shuffle(__UpperCamelCase ) lowercase_ : Dict = self.images[perma] lowercase_ : List[Any] = self.labels[perma] # Go to the next epoch if start + batch_size > self._num_examples: # Finished epoch self._epochs_completed += 1 # Get the rest examples in this epoch lowercase_ : Tuple = self._num_examples - start lowercase_ : Optional[Any] = self._images[start : self._num_examples] lowercase_ : List[str] = self._labels[start : self._num_examples] # Shuffle the data if shuffle: lowercase_ : List[Any] = numpy.arange(self._num_examples ) numpy.random.shuffle(__UpperCamelCase ) lowercase_ : Optional[Any] = self.images[perm] lowercase_ : List[Any] = self.labels[perm] # Start next epoch lowercase_ : str = 0 lowercase_ : List[str] = batch_size - rest_num_examples lowercase_ : int = self._index_in_epoch lowercase_ : Any = self._images[start:end] lowercase_ : Optional[int] = self._labels[start:end] return ( numpy.concatenate((images_rest_part, images_new_part) ,axis=0 ), numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0 ), ) else: self._index_in_epoch += batch_size lowercase_ : str = self._index_in_epoch return self._images[start:end], self._labels[start:end] @deprecated(__SCREAMING_SNAKE_CASE , 'Please write your own downloading logic.' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any ): if not gfile.Exists(__SCREAMING_SNAKE_CASE ): gfile.MakeDirs(__SCREAMING_SNAKE_CASE ) lowercase_ : str = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not gfile.Exists(__SCREAMING_SNAKE_CASE ): urllib.request.urlretrieve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # noqa: S310 with gfile.GFile(__SCREAMING_SNAKE_CASE ) as f: lowercase_ : List[str] = f.size() print('Successfully downloaded' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , 'bytes.' ) return filepath @deprecated( __SCREAMING_SNAKE_CASE , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : str=dtypes.floataa , __SCREAMING_SNAKE_CASE : Union[str, Any]=True , __SCREAMING_SNAKE_CASE : Tuple=50_00 , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Optional[Any]=DEFAULT_SOURCE_URL , ): if fake_data: def fake(): return _DataSet( [] , [] , fake_data=__SCREAMING_SNAKE_CASE , one_hot=__SCREAMING_SNAKE_CASE , dtype=__SCREAMING_SNAKE_CASE , seed=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = fake() lowercase_ : List[str] = fake() lowercase_ : List[str] = fake() return _Datasets(train=__SCREAMING_SNAKE_CASE , validation=__SCREAMING_SNAKE_CASE , test=__SCREAMING_SNAKE_CASE ) if not source_url: # empty string check lowercase_ : Optional[int] = DEFAULT_SOURCE_URL lowercase_ : Dict = 'train-images-idx3-ubyte.gz' lowercase_ : Tuple = 'train-labels-idx1-ubyte.gz' lowercase_ : List[Any] = 't10k-images-idx3-ubyte.gz' lowercase_ : Optional[Any] = 't10k-labels-idx1-ubyte.gz' lowercase_ : Dict = _maybe_download( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , source_url + train_images_file ) with gfile.Open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: lowercase_ : Optional[int] = _extract_images(__SCREAMING_SNAKE_CASE ) lowercase_ : int = _maybe_download( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , source_url + train_labels_file ) with gfile.Open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: lowercase_ : Tuple = _extract_labels(__SCREAMING_SNAKE_CASE , one_hot=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = _maybe_download( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , source_url + test_images_file ) with gfile.Open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: lowercase_ : Tuple = _extract_images(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = _maybe_download( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , source_url + test_labels_file ) with gfile.Open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: lowercase_ : Optional[Any] = _extract_labels(__SCREAMING_SNAKE_CASE , one_hot=__SCREAMING_SNAKE_CASE ) if not 0 <= validation_size <= len(__SCREAMING_SNAKE_CASE ): lowercase_ : str = ( 'Validation size should be between 0 and ' F'''{len(__SCREAMING_SNAKE_CASE )}. Received: {validation_size}.''' ) raise ValueError(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = train_images[:validation_size] lowercase_ : List[Any] = train_labels[:validation_size] lowercase_ : Union[str, Any] = train_images[validation_size:] lowercase_ : Union[str, Any] = train_labels[validation_size:] lowercase_ : Optional[int] = {'dtype': dtype, 'reshape': reshape, 'seed': seed} lowercase_ : Any = _DataSet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = _DataSet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = _DataSet(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) return _Datasets(train=__SCREAMING_SNAKE_CASE , validation=__SCREAMING_SNAKE_CASE , test=__SCREAMING_SNAKE_CASE )
321
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
1
"""simple docstring""" from __future__ import annotations from random import random class UpperCamelCase : def __init__( self ,__UpperCamelCase = None ) -> Optional[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = value lowercase_ : str = random() lowercase_ : Node | None = None lowercase_ : Node | None = None def __repr__( self ) -> str: '''simple docstring''' from pprint import pformat if self.left is None and self.right is None: return f'''\'{self.value}: {self.prior:.5}\'''' else: return pformat( {f'''{self.value}: {self.prior:.5}''': (self.left, self.right)} ,indent=1 ) def __str__( self ) -> str: '''simple docstring''' lowercase_ : List[str] = str(self.value ) + ' ' lowercase_ : Any = str(self.left or '' ) lowercase_ : List[str] = str(self.right or '' ) return value + left + right def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : int ): if root is None: # None tree is split into 2 Nones return None, None elif root.value is None: return None, None else: if value < root.value: lowercase_ , lowercase_ : Any = split(root.left , __SCREAMING_SNAKE_CASE ) return left, root else: lowercase_ , lowercase_ : Union[str, Any] = split(root.right , __SCREAMING_SNAKE_CASE ) return root, right def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : Node | None ): if (not left) or (not right): # If one node is None, return the other return left or right elif left.prior < right.prior: lowercase_ : int = merge(left.right , __SCREAMING_SNAKE_CASE ) return left else: lowercase_ : List[str] = merge(__SCREAMING_SNAKE_CASE , right.left ) return right def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Any = Node(__SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : Any = split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return merge(merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : int ): lowercase_ , lowercase_ : Optional[Any] = split(__SCREAMING_SNAKE_CASE , value - 1 ) lowercase_ , lowercase_ : str = split(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return merge(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Node | None ): if not root: # None return else: inorder(root.left ) print(root.value , end=',' ) inorder(root.right ) def lowercase__( __SCREAMING_SNAKE_CASE : Node | None , __SCREAMING_SNAKE_CASE : str ): for arg in args.split(): if arg[0] == "+": lowercase_ : Union[str, Any] = insert(__SCREAMING_SNAKE_CASE , int(arg[1:] ) ) elif arg[0] == "-": lowercase_ : Union[str, Any] = erase(__SCREAMING_SNAKE_CASE , int(arg[1:] ) ) else: print('Unknown command' ) return root def lowercase__( ): lowercase_ : Optional[int] = None print( 'enter numbers to create a tree, + value to add value into treap, ' '- value to erase all nodes with value. \'q\' to quit. ' ) lowercase_ : Dict = input() while args != "q": lowercase_ : Union[str, Any] = interact_treap(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = input() print('good by!' ) if __name__ == "__main__": import doctest doctest.testmod() main()
321
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
1
"""simple docstring""" from __future__ import annotations import math class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Tuple = size # approximate the overall size of segment tree with given value lowercase_ : Dict = [0 for i in range(0 ,4 * size )] # create array to store lazy update lowercase_ : Optional[int] = [0 for i in range(0 ,4 * size )] lowercase_ : List[str] = [0 for i in range(0 ,4 * size )] # flag for lazy update def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' return idx * 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> None: '''simple docstring''' if left_element == right_element: lowercase_ : Any = a[left_element - 1] else: lowercase_ : str = (left_element + right_element) // 2 self.build(self.left(__UpperCamelCase ) ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) self.build(self.right(__UpperCamelCase ) ,mid + 1 ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = max( self.segment_tree[self.left(__UpperCamelCase )] ,self.segment_tree[self.right(__UpperCamelCase )] ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' if self.flag[idx] is True: lowercase_ : Dict = self.lazy[idx] lowercase_ : List[str] = False if left_element != right_element: lowercase_ : Any = self.lazy[idx] lowercase_ : List[str] = self.lazy[idx] lowercase_ : Dict = True lowercase_ : Optional[Any] = True if right_element < a or left_element > b: return True if left_element >= a and right_element <= b: lowercase_ : Tuple = val if left_element != right_element: lowercase_ : Optional[int] = val lowercase_ : Union[str, Any] = val lowercase_ : Tuple = True lowercase_ : Optional[Any] = True return True lowercase_ : Optional[Any] = (left_element + right_element) // 2 self.update(self.left(__UpperCamelCase ) ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) self.update(self.right(__UpperCamelCase ) ,mid + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = max( self.segment_tree[self.left(__UpperCamelCase )] ,self.segment_tree[self.right(__UpperCamelCase )] ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int | float: '''simple docstring''' if self.flag[idx] is True: lowercase_ : Optional[Any] = self.lazy[idx] lowercase_ : Optional[int] = False if left_element != right_element: lowercase_ : Tuple = self.lazy[idx] lowercase_ : Tuple = self.lazy[idx] lowercase_ : Union[str, Any] = True lowercase_ : Optional[Any] = True if right_element < a or left_element > b: return -math.inf if left_element >= a and right_element <= b: return self.segment_tree[idx] lowercase_ : List[Any] = (left_element + right_element) // 2 lowercase_ : Tuple = self.query(self.left(__UpperCamelCase ) ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Union[str, Any] = self.query(self.right(__UpperCamelCase ) ,mid + 1 ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) return max(__UpperCamelCase ,__UpperCamelCase ) def __str__( self ) -> str: '''simple docstring''' return str([self.query(1 ,1 ,self.size ,__UpperCamelCase ,__UpperCamelCase ) for i in range(1 ,self.size + 1 )] ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =[1, 2, -4, 7, 3, -5, 6, 11, -20, 9, 14, 15, 5, 2, -8] __SCREAMING_SNAKE_CASE =15 __SCREAMING_SNAKE_CASE =SegmentTree(size) segt.build(1, 1, size, A) print(segt.query(1, 1, size, 4, 6)) print(segt.query(1, 1, size, 7, 11)) print(segt.query(1, 1, size, 7, 12)) segt.update(1, 1, size, 1, 3, 111) print(segt.query(1, 1, size, 1, 15)) segt.update(1, 1, size, 7, 8, 235) print(segt)
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import VivitImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=10 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=None ,) -> Optional[int]: '''simple docstring''' lowercase_ : Any = size if size is not None else {'shortest_edge': 18} lowercase_ : Optional[Any] = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowercase_ : List[str] = parent lowercase_ : List[str] = batch_size lowercase_ : Optional[int] = num_channels lowercase_ : Union[str, Any] = num_frames lowercase_ : Union[str, Any] = image_size lowercase_ : List[str] = min_resolution lowercase_ : int = max_resolution lowercase_ : Union[str, Any] = do_resize lowercase_ : Optional[int] = size lowercase_ : str = do_normalize lowercase_ : Tuple = image_mean lowercase_ : Any = image_std lowercase_ : Tuple = crop_size def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = VivitImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = VivitImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_center_crop' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'size' ) ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Any = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} ) lowercase_ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PIL videos lowercase_ : Any = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ) for video in video_inputs: self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase ) self.assertIsInstance(video[0] ,Image.Image ) # Test not batched input lowercase_ : Any = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : List[Any] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase ) for video in video_inputs: self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase ) self.assertIsInstance(video[0] ,np.ndarray ) # Test not batched input lowercase_ : Tuple = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : List[str] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Optional[int] = prepare_video_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase ) for video in video_inputs: self.assertIsInstance(__UpperCamelCase ,__UpperCamelCase ) self.assertIsInstance(video[0] ,torch.Tensor ) # Test not batched input lowercase_ : Optional[int] = image_processing(video_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( 1, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Tuple = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_videos.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_frames, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,)
321
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
1
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
1
"""simple docstring""" from bisect import bisect from itertools import accumulate def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Optional[int] = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : int = [i[0] for i in r], [i[1] for i in r] lowercase_ : str = list(accumulate(__SCREAMING_SNAKE_CASE ) ) lowercase_ : Union[str, Any] = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return ( 0 if k == 0 else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k] ) ) if __name__ == "__main__": import doctest doctest.testmod()
321
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class UpperCamelCase ( lowercase_ ): lowercase = 'pix2struct_text_model' lowercase = ['past_key_values'] lowercase = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,__UpperCamelCase=5_0244 ,__UpperCamelCase=768 ,__UpperCamelCase=64 ,__UpperCamelCase=2048 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase=32 ,__UpperCamelCase=128 ,__UpperCamelCase=0.1 ,__UpperCamelCase=1e-6 ,__UpperCamelCase=1.0 ,__UpperCamelCase="gelu_new" ,__UpperCamelCase=0 ,__UpperCamelCase=False ,__UpperCamelCase=0 ,__UpperCamelCase=1 ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[Any] = vocab_size lowercase_ : List[Any] = hidden_size lowercase_ : Optional[int] = d_kv lowercase_ : Union[str, Any] = d_ff lowercase_ : Tuple = num_layers lowercase_ : Tuple = num_heads lowercase_ : int = relative_attention_num_buckets lowercase_ : Tuple = relative_attention_max_distance lowercase_ : str = dropout_rate lowercase_ : Optional[Any] = layer_norm_epsilon lowercase_ : Union[str, Any] = initializer_factor lowercase_ : Dict = use_cache lowercase_ : Optional[int] = eos_token_id lowercase_ : Dict = decoder_start_token_id # for backwards compatibility lowercase_ : List[str] = dense_act_fn super().__init__( pad_token_id=__UpperCamelCase ,eos_token_id=__UpperCamelCase ,decoder_start_token_id=__UpperCamelCase ,tie_word_embeddings=__UpperCamelCase ,is_decoder=__UpperCamelCase ,**__UpperCamelCase ,) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__UpperCamelCase ) lowercase_ , lowercase_ : Optional[int] = cls.get_config_dict(__UpperCamelCase ,**__UpperCamelCase ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get('model_type' ) == "pix2struct": lowercase_ : Optional[int] = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCamelCase ,**__UpperCamelCase ) class UpperCamelCase ( lowercase_ ): lowercase = 'pix2struct_vision_model' def __init__( self ,__UpperCamelCase=768 ,__UpperCamelCase=768 ,__UpperCamelCase=2048 ,__UpperCamelCase=64 ,__UpperCamelCase=12 ,__UpperCamelCase=12 ,__UpperCamelCase="gelu_new" ,__UpperCamelCase=1e-6 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=1e-10 ,__UpperCamelCase=1.0 ,__UpperCamelCase=4096 ,__UpperCamelCase=32 ,__UpperCamelCase=128 ,**__UpperCamelCase ,) -> int: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : Any = hidden_size lowercase_ : Optional[int] = patch_embed_hidden_size lowercase_ : List[str] = d_ff lowercase_ : int = dropout_rate lowercase_ : Union[str, Any] = num_hidden_layers lowercase_ : Optional[int] = num_attention_heads lowercase_ : Optional[Any] = initializer_range lowercase_ : Tuple = initializer_factor lowercase_ : int = attention_dropout lowercase_ : Optional[Any] = layer_norm_eps lowercase_ : Optional[Any] = dense_act_fn lowercase_ : List[str] = seq_len lowercase_ : str = relative_attention_num_buckets lowercase_ : List[Any] = relative_attention_max_distance lowercase_ : Optional[Any] = d_kv @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,**__UpperCamelCase ) -> "PretrainedConfig": '''simple docstring''' cls._set_token_in_kwargs(__UpperCamelCase ) lowercase_ , lowercase_ : List[str] = cls.get_config_dict(__UpperCamelCase ,**__UpperCamelCase ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get('model_type' ) == "pix2struct": lowercase_ : Tuple = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls ,'model_type' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(__UpperCamelCase ,**__UpperCamelCase ) class UpperCamelCase ( lowercase_ ): lowercase = 'pix2struct' lowercase = True def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=1.0 ,__UpperCamelCase=0.02 ,__UpperCamelCase=False ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__(tie_word_embeddings=__UpperCamelCase ,is_encoder_decoder=__UpperCamelCase ,**__UpperCamelCase ) if text_config is None: lowercase_ : Optional[int] = {} logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' ) if vision_config is None: lowercase_ : List[Any] = {} logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' ) lowercase_ : str = PixaStructTextConfig(**__UpperCamelCase ) lowercase_ : Tuple = PixaStructVisionConfig(**__UpperCamelCase ) lowercase_ : List[Any] = self.text_config.decoder_start_token_id lowercase_ : List[str] = self.text_config.pad_token_id lowercase_ : Any = self.text_config.eos_token_id lowercase_ : Tuple = initializer_factor lowercase_ : Any = initializer_range lowercase_ : List[str] = self.initializer_range lowercase_ : Optional[int] = self.initializer_range lowercase_ : List[Any] = is_vqa @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Tuple: '''simple docstring''' return cls(text_config=text_config.to_dict() ,vision_config=vision_config.to_dict() ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = copy.deepcopy(self.__dict__ ) lowercase_ : Any = self.text_config.to_dict() lowercase_ : Dict = self.vision_config.to_dict() lowercase_ : Optional[int] = self.__class__.model_type return output
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
1
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
1
"""simple docstring""" import unittest from pathlib import Path from tempfile import TemporaryDirectory from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available from transformers.models.gpta.tokenization_gpta import GPTaTokenizer from transformers.testing_utils import require_keras_nlp, require_tf, slow if is_tf_available(): import tensorflow as tf if is_keras_nlp_available(): from transformers.models.gpta import TFGPTaTokenizer __SCREAMING_SNAKE_CASE =["gpt2"] __SCREAMING_SNAKE_CASE ="gpt2" if is_tf_available(): class UpperCamelCase ( tf.Module ): def __init__( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' super().__init__() lowercase_ : List[str] = tokenizer lowercase_ : List[str] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TFGPTaLMHeadModel.from_config(__UpperCamelCase ) @tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='text' ),) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[Any] = self.tokenizer(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenized['input_ids'].to_tensor() lowercase_ : List[str] = tf.cast(input_ids_dense > 0 ,tf.intaa ) # input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN]) lowercase_ : List[str] = self.model(input_ids=__UpperCamelCase ,attention_mask=__UpperCamelCase )['logits'] return outputs @require_tf @require_keras_nlp class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() lowercase_ : Tuple = [GPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)] lowercase_ : Optional[Any] = [TFGPTaTokenizer.from_pretrained(__UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS] assert len(self.tokenizers ) == len(self.tf_tokenizers ) lowercase_ : List[str] = [ 'This is a straightforward English test sentence.', 'This one has some weird characters\rto\nsee\r\nif those\u00E9break things.', 'Now we\'re going to add some Chinese: 一 二 三 一二三', 'And some much more rare Chinese: 齉 堃 齉堃', 'Je vais aussi écrire en français pour tester les accents', 'Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ', ] lowercase_ : str = list(zip(self.test_sentences ,self.test_sentences[::-1] ) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ): for test_inputs in self.test_sentences: lowercase_ : str = tokenizer([test_inputs] ,return_tensors='tf' ) lowercase_ : Optional[int] = tf_tokenizer([test_inputs] ) for key in python_outputs.keys(): # convert them to numpy to avoid messing with ragged tensors lowercase_ : List[str] = python_outputs[key].numpy() lowercase_ : str = tf_outputs[key].numpy() self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) ) self.assertTrue(tf.reduce_all(tf.cast(__UpperCamelCase ,tf.intaa ) == tf_outputs_values ) ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase_ : List[str] = tf.function(__UpperCamelCase ) for test_inputs in self.test_sentences: lowercase_ : Dict = tf.constant(__UpperCamelCase ) lowercase_ : Union[str, Any] = compiled_tokenizer(__UpperCamelCase ) lowercase_ : Dict = tf_tokenizer(__UpperCamelCase ) for key in eager_outputs.keys(): self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase_ : Union[str, Any] = ModelToSave(tokenizer=__UpperCamelCase ) lowercase_ : Optional[Any] = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase_ : Tuple = model.serving(__UpperCamelCase ) # Build model with some sample inputs with TemporaryDirectory() as tempdir: lowercase_ : List[Any] = Path(__UpperCamelCase ) / 'saved.model' tf.saved_model.save(__UpperCamelCase ,__UpperCamelCase ,signatures={'serving_default': model.serving} ) lowercase_ : Optional[Any] = tf.saved_model.load(__UpperCamelCase ) lowercase_ : Any = loaded_model.signatures['serving_default'](__UpperCamelCase )['output_0'] # We may see small differences because the loaded model is compiled, so we need an epsilon for the test self.assertTrue(tf.reduce_all(out == loaded_output ) ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: lowercase_ : List[str] = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase_ : Any = tf_tokenizer(__UpperCamelCase ) # Build model with some sample inputs lowercase_ : Union[str, Any] = tf_tokenizer.get_config() lowercase_ : Dict = TFGPTaTokenizer.from_config(__UpperCamelCase ) lowercase_ : Union[str, Any] = model_from_config(__UpperCamelCase ) for key in from_config_output.keys(): self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) ) @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' for tf_tokenizer in self.tf_tokenizers: # for the test to run lowercase_ : Union[str, Any] = 12_3123 for max_length in [3, 5, 1024]: lowercase_ : str = tf.convert_to_tensor([self.test_sentences[0]] ) lowercase_ : List[Any] = tf_tokenizer(__UpperCamelCase ,max_length=__UpperCamelCase ) lowercase_ : Any = out['input_ids'].numpy().shape[1] assert out_length == max_length
321
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" from typing import List, Optional, Union from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "huggingface/time-series-transformer-tourism-monthly": ( "https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json" ), # See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer } class UpperCamelCase ( lowercase_ ): lowercase = 'time_series_transformer' lowercase = { 'hidden_size': 'd_model', 'num_attention_heads': 'encoder_attention_heads', 'num_hidden_layers': 'encoder_layers', } def __init__( self ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "student_t" ,__UpperCamelCase = "nll" ,__UpperCamelCase = 1 ,__UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] ,__UpperCamelCase = "mean" ,__UpperCamelCase = 0 ,__UpperCamelCase = 0 ,__UpperCamelCase = 0 ,__UpperCamelCase = 0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = 32 ,__UpperCamelCase = 32 ,__UpperCamelCase = 2 ,__UpperCamelCase = 2 ,__UpperCamelCase = 2 ,__UpperCamelCase = 2 ,__UpperCamelCase = True ,__UpperCamelCase = "gelu" ,__UpperCamelCase = 64 ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 0.1 ,__UpperCamelCase = 100 ,__UpperCamelCase = 0.02 ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = prediction_length lowercase_ : Dict = context_length or prediction_length lowercase_ : Dict = distribution_output lowercase_ : List[str] = loss lowercase_ : Optional[int] = input_size lowercase_ : Dict = num_time_features lowercase_ : Tuple = lags_sequence lowercase_ : int = scaling lowercase_ : int = num_dynamic_real_features lowercase_ : Union[str, Any] = num_static_real_features lowercase_ : int = num_static_categorical_features if cardinality and num_static_categorical_features > 0: if len(__UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The cardinality should be a list of the same length as `num_static_categorical_features`' ) lowercase_ : Optional[int] = cardinality else: lowercase_ : str = [0] if embedding_dimension and num_static_categorical_features > 0: if len(__UpperCamelCase ) != num_static_categorical_features: raise ValueError( 'The embedding dimension should be a list of the same length as `num_static_categorical_features`' ) lowercase_ : List[Any] = embedding_dimension else: lowercase_ : Optional[int] = [min(50 ,(cat + 1) // 2 ) for cat in self.cardinality] lowercase_ : Dict = num_parallel_samples # Transformer architecture configuration lowercase_ : List[str] = input_size * len(__UpperCamelCase ) + self._number_of_features lowercase_ : Optional[int] = d_model lowercase_ : Dict = encoder_attention_heads lowercase_ : Any = decoder_attention_heads lowercase_ : List[Any] = encoder_ffn_dim lowercase_ : Union[str, Any] = decoder_ffn_dim lowercase_ : Optional[Any] = encoder_layers lowercase_ : Optional[Any] = decoder_layers lowercase_ : Any = dropout lowercase_ : Tuple = attention_dropout lowercase_ : Union[str, Any] = activation_dropout lowercase_ : int = encoder_layerdrop lowercase_ : List[Any] = decoder_layerdrop lowercase_ : List[Any] = activation_function lowercase_ : List[str] = init_std lowercase_ : Tuple = use_cache super().__init__(is_encoder_decoder=__UpperCamelCase ,**__UpperCamelCase ) @property def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return ( sum(self.embedding_dimension ) + self.num_dynamic_real_features + self.num_time_features + self.num_static_real_features + self.input_size * 2 # the log1p(abs(loc)) and log(scale) features )
321
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __SCREAMING_SNAKE_CASE ={ "configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MegatronBertForCausalLM", "MegatronBertForMaskedLM", "MegatronBertForMultipleChoice", "MegatronBertForNextSentencePrediction", "MegatronBertForPreTraining", "MegatronBertForQuestionAnswering", "MegatronBertForSequenceClassification", "MegatronBertForTokenClassification", "MegatronBertModel", "MegatronBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_megatron_bert import ( MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, MegatronBertForCausalLM, MegatronBertForMaskedLM, MegatronBertForMultipleChoice, MegatronBertForNextSentencePrediction, MegatronBertForPreTraining, MegatronBertForQuestionAnswering, MegatronBertForSequenceClassification, MegatronBertForTokenClassification, MegatronBertModel, MegatronBertPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
321
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
1
"""simple docstring""" import os import re import shutil import sys import tempfile import unittest import black __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated. __SCREAMING_SNAKE_CASE =" \"\"\"\n Output class for the scheduler's step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n" class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Any = tempfile.mkdtemp() os.makedirs(os.path.join(self.diffusers_dir ,'schedulers/' ) ) lowercase_ : List[Any] = self.diffusers_dir shutil.copy( os.path.join(__UpperCamelCase ,'src/diffusers/schedulers/scheduling_ddpm.py' ) ,os.path.join(self.diffusers_dir ,'schedulers/scheduling_ddpm.py' ) ,) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : str = 'src/diffusers' shutil.rmtree(self.diffusers_dir ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: lowercase_ : Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result lowercase_ : Union[str, Any] = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=119 ) lowercase_ : List[Any] = black.format_str(__UpperCamelCase ,mode=__UpperCamelCase ) lowercase_ : Dict = os.path.join(self.diffusers_dir ,'new_code.py' ) with open(__UpperCamelCase ,'w' ,newline='\n' ) as f: f.write(__UpperCamelCase ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 ) else: check_copies.is_copy_consistent(f.name ,overwrite=__UpperCamelCase ) with open(__UpperCamelCase ,'r' ) as f: self.assertTrue(f.read() ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = check_copies.find_code_in_diffusers('schedulers.scheduling_ddpm.DDPMSchedulerOutput' ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,REFERENCE_CODE + '\n' ,) # With no empty line at the end self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput' ,'DDPMSchedulerOutput' ,__UpperCamelCase ,) # Copy consistency with rename self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,re.sub('DDPM' ,'Test' ,__UpperCamelCase ) ,) # Copy consistency with a really long name lowercase_ : List[Any] = 'TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason' self.check_copy_consistency( f'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}''' ,f'''{long_class_name}SchedulerOutput''' ,re.sub('Bert' ,__UpperCamelCase ,__UpperCamelCase ) ,) # Copy consistency with overwrite self.check_copy_consistency( '# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test' ,'TestSchedulerOutput' ,__UpperCamelCase ,overwrite_result=re.sub('DDPM' ,'Test' ,__UpperCamelCase ) ,)
321
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __SCREAMING_SNAKE_CASE ="base_with_context" def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) ) lowercase_ : Optional[Any] = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): lowercase_ : Dict = weights[F'''layers_{lyr_num}'''] lowercase_ : Optional[int] = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) lowercase_ : Any = ly_weight['attention'] lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : List[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : Dict = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : List[Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str ): lowercase_ : Dict = nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) for lyr_num, lyr in enumerate(model.encoders ): lowercase_ : Any = weights[F'''layers_{lyr_num}'''] lowercase_ : List[str] = ly_weight['attention'] lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : Dict = nn.Parameter( torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : Union[str, Any] = nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : Any = nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) ) lowercase_ : str = nn.Parameter( torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__SCREAMING_SNAKE_CASE ) lowercase_ : str = nn.Parameter( torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) ) for lyr_num, lyr in enumerate(model.decoders ): lowercase_ : Optional[int] = weights[F'''layers_{lyr_num}'''] lowercase_ : Union[str, Any] = nn.Parameter( torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) ) lowercase_ : Tuple = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) ) lowercase_ : int = ly_weight['self_attention'] lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Optional[int] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : List[str] = ly_weight['MultiHeadDotProductAttention_0'] lowercase_ : int = nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) ) lowercase_ : Optional[Any] = nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) ) lowercase_ : int = nn.Parameter( torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) ) lowercase_ : str = nn.Parameter( torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) ) lowercase_ : str = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) ) lowercase_ : Tuple = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) ) lowercase_ : Any = nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) ) lowercase_ : List[str] = nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) ) return model def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : int = checkpoints.load_tax_checkpoint(args.checkpoint_path ) lowercase_ : Union[str, Any] = jnp.tree_util.tree_map(onp.array , __SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = [ 'from __gin__ import dynamic_registration', 'from music_spectrogram_diffusion.models.diffusion import diffusion_utils', 'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0', 'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()', ] lowercase_ : str = os.path.join(args.checkpoint_path , '..' , 'config.gin' ) lowercase_ : Union[str, Any] = inference.parse_training_gin_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = inference.InferenceModel(args.checkpoint_path , __SCREAMING_SNAKE_CASE ) lowercase_ : str = DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' ) lowercase_ : str = SpectrogramNotesEncoder( max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) lowercase_ : Optional[Any] = SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , ) lowercase_ : List[str] = TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) lowercase_ : Dict = load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Dict = load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = load_decoder(ta_checkpoint['target']['decoder'] , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' ) lowercase_ : Dict = SpectrogramDiffusionPipeline( notes_encoder=__SCREAMING_SNAKE_CASE , continuous_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , melgan=__SCREAMING_SNAKE_CASE , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument( "--checkpoint_path", default=F"{MODEL}/checkpoint_500000", type=str, required=False, help="Path to the original jax model checkpoint.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() main(args)
321
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
1
"""simple docstring""" __SCREAMING_SNAKE_CASE =8.31_44_62 # Unit - J mol-1 K-1 def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if moles < 0 or kelvin < 0 or volume < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if moles < 0 or kelvin < 0 or pressure < 0: raise ValueError('Invalid inputs. Enter positive value.' ) return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure if __name__ == "__main__": from doctest import testmod testmod()
321
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
1
"""simple docstring""" __SCREAMING_SNAKE_CASE =range(2, 20 + 1) __SCREAMING_SNAKE_CASE =[10**k for k in range(ks[-1] + 1)] __SCREAMING_SNAKE_CASE ={} def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : List[Any] = sum(a_i[j] for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ) lowercase_ : Union[str, Any] = sum(a_i[j] * base[j] for j in range(min(len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) ) lowercase_ , lowercase_ : Optional[int] = 0, 0 lowercase_ : Dict = n - i lowercase_ : List[str] = memo.get(__SCREAMING_SNAKE_CASE ) if sub_memo is not None: lowercase_ : Any = sub_memo.get(__SCREAMING_SNAKE_CASE ) if jumps is not None and len(__SCREAMING_SNAKE_CASE ) > 0: # find and make the largest jump without going over lowercase_ : List[str] = -1 for _k in range(len(__SCREAMING_SNAKE_CASE ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: lowercase_ : List[Any] = _k break if max_jump >= 0: lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = jumps[max_jump] # since the difference between jumps is cached, add c lowercase_ : List[Any] = diff + c for j in range(min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) ): lowercase_ , lowercase_ : Optional[Any] = divmod(__SCREAMING_SNAKE_CASE , 10 ) if new_c > 0: add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: lowercase_ : List[Any] = [] else: lowercase_ : Optional[int] = {c: []} lowercase_ : int = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps lowercase_ , lowercase_ : Union[str, Any] = next_term(__SCREAMING_SNAKE_CASE , k - 1 , i + dn , __SCREAMING_SNAKE_CASE ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead lowercase_ , lowercase_ : List[Any] = compute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , i + dn , __SCREAMING_SNAKE_CASE ) diff += _diff dn += terms_jumped lowercase_ : List[Any] = sub_memo[c] # keep jumps sorted by # of terms skipped lowercase_ : str = 0 while j < len(__SCREAMING_SNAKE_CASE ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(__SCREAMING_SNAKE_CASE , (diff, dn, k) ) return (diff, dn) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Tuple ): if i >= n: return 0, i if k > len(__SCREAMING_SNAKE_CASE ): a_i.extend([0 for _ in range(k - len(__SCREAMING_SNAKE_CASE ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) lowercase_ : Tuple = i lowercase_ , lowercase_ , lowercase_ : int = 0, 0, 0 for j in range(len(__SCREAMING_SNAKE_CASE ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 lowercase_ : Dict = ds_c + ds_b diff += addend lowercase_ : Tuple = 0 for j in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Tuple = a_i[j] + addend lowercase_ , lowercase_ : Tuple = divmod(__SCREAMING_SNAKE_CASE , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return diff, i - start_i def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Dict ): for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ): lowercase_ : List[str] = digits[j] + addend if s >= 10: lowercase_ , lowercase_ : Optional[int] = divmod(__SCREAMING_SNAKE_CASE , 10 ) lowercase_ : int = addend // 10 + quotient else: lowercase_ : List[Any] = s lowercase_ : List[str] = addend // 10 if addend == 0: break while addend > 0: lowercase_ , lowercase_ : List[Any] = divmod(__SCREAMING_SNAKE_CASE , 10 ) digits.append(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int = 10**15 ): lowercase_ : Any = [1] lowercase_ : str = 1 lowercase_ : Tuple = 0 while True: lowercase_ , lowercase_ : Optional[int] = next_term(__SCREAMING_SNAKE_CASE , 20 , i + dn , __SCREAMING_SNAKE_CASE ) dn += terms_jumped if dn == n - i: break lowercase_ : int = 0 for j in range(len(__SCREAMING_SNAKE_CASE ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F"{solution() = }")
321
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional import torch from datasets import load_dataset from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor from torchvision.transforms.functional import InterpolationMode import transformers from transformers import ( HfArgumentParser, Trainer, TrainingArguments, ViTImageProcessor, ViTMAEConfig, ViTMAEForPreTraining, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version __SCREAMING_SNAKE_CASE =logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.31.0") require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt") @dataclass class UpperCamelCase : lowercase = field( default='cifar10' , metadata={'help': 'Name of a dataset from the datasets package'} ) lowercase = field( default=lowercase_ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) lowercase = field( default=lowercase_ , metadata={'help': 'The column name of the images in the files.'} ) lowercase = field(default=lowercase_ , metadata={'help': 'A folder containing the training data.'} ) lowercase = field(default=lowercase_ , metadata={'help': 'A folder containing the validation data.'} ) lowercase = field( default=0.15 , metadata={'help': 'Percent to split off of train for validation.'} ) lowercase = field( default=lowercase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of training examples to this ' 'value if set.' ) } , ) lowercase = field( default=lowercase_ , metadata={ 'help': ( 'For debugging purposes or quicker training, truncate the number of evaluation examples to this ' 'value if set.' ) } , ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = {} if self.train_dir is not None: lowercase_ : Dict = self.train_dir if self.validation_dir is not None: lowercase_ : Optional[Any] = self.validation_dir lowercase_ : Tuple = data_files if data_files else None @dataclass class UpperCamelCase : lowercase = field( default=lowercase_ , metadata={ 'help': ( 'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.' ) } , ) lowercase = field( default=lowercase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name_or_path'} ) lowercase = field( default=lowercase_ , metadata={ 'help': ( 'Override some existing default config settings when a model is trained from scratch. Example: ' 'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index' ) } , ) lowercase = field( default=lowercase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from s3'} ) lowercase = field( default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , ) lowercase = field(default=lowercase_ , metadata={'help': 'Name or path of preprocessor config.'} ) lowercase = field( default=lowercase_ , metadata={ 'help': ( 'Will use the token generated when running `huggingface-cli login` (necessary to use this script ' 'with private models).' ) } , ) lowercase = field( default=0.75 , metadata={'help': 'The ratio of the number of masked tokens in the input sequence.'} ) lowercase = field( default=lowercase_ , metadata={'help': 'Whether or not to train with normalized pixel values as target.'} ) @dataclass class UpperCamelCase ( lowercase_ ): lowercase = field( default=1e-3 , metadata={'help': 'Base learning rate: absolute_lr = base_lr * total_batch_size / 256.'} ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : List[Any] = torch.stack([example['pixel_values'] for example in examples] ) return {"pixel_values": pixel_values} def lowercase__( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. lowercase_ : Union[str, Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. lowercase_ , lowercase_ , lowercase_ : Dict = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: lowercase_ , lowercase_ , lowercase_ : Optional[Any] = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry('run_mae' , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() lowercase_ : List[str] = training_args.get_process_log_level() logger.setLevel(__SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}''' + F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' ) logger.info(F'''Training/evaluation parameters {training_args}''' ) # Detecting last checkpoint. lowercase_ : int = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: lowercase_ : List[str] = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F'''Output directory ({training_args.output_dir}) already exists and is not empty. ''' 'Use --overwrite_output_dir to overcome.' ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change ''' 'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' ) # Initialize our dataset. lowercase_ : Any = load_dataset( data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , ) # If we don't have a validation split, split off a percentage of train as validation. lowercase_ : Any = None if 'validation' in ds.keys() else data_args.train_val_split if isinstance(data_args.train_val_split , __SCREAMING_SNAKE_CASE ) and data_args.train_val_split > 0.0: lowercase_ : Any = ds['train'].train_test_split(data_args.train_val_split ) lowercase_ : Optional[Any] = split['train'] lowercase_ : Union[str, Any] = split['test'] # Load pretrained model and image processor # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. lowercase_ : Any = { 'cache_dir': model_args.cache_dir, 'revision': model_args.model_revision, 'use_auth_token': True if model_args.use_auth_token else None, } if model_args.config_name: lowercase_ : List[Any] = ViTMAEConfig.from_pretrained(model_args.config_name , **__SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: lowercase_ : List[str] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **__SCREAMING_SNAKE_CASE ) else: lowercase_ : List[Any] = ViTMAEConfig() logger.warning('You are instantiating a new config instance from scratch.' ) if model_args.config_overrides is not None: logger.info(F'''Overriding config: {model_args.config_overrides}''' ) config.update_from_string(model_args.config_overrides ) logger.info(F'''New config: {config}''' ) # adapt config config.update( { 'mask_ratio': model_args.mask_ratio, 'norm_pix_loss': model_args.norm_pix_loss, } ) # create image processor if model_args.image_processor_name: lowercase_ : List[Any] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **__SCREAMING_SNAKE_CASE ) elif model_args.model_name_or_path: lowercase_ : Tuple = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **__SCREAMING_SNAKE_CASE ) else: lowercase_ : Tuple = ViTImageProcessor() # create model if model_args.model_name_or_path: lowercase_ : List[str] = ViTMAEForPreTraining.from_pretrained( model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) else: logger.info('Training new model from scratch' ) lowercase_ : int = ViTMAEForPreTraining(__SCREAMING_SNAKE_CASE ) if training_args.do_train: lowercase_ : Any = ds['train'].column_names else: lowercase_ : Any = ds['validation'].column_names if data_args.image_column_name is not None: lowercase_ : Optional[int] = data_args.image_column_name elif "image" in column_names: lowercase_ : List[Any] = 'image' elif "img" in column_names: lowercase_ : Dict = 'img' else: lowercase_ : Any = column_names[0] # transformations as done in original MAE paper # source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py if "shortest_edge" in image_processor.size: lowercase_ : List[str] = image_processor.size['shortest_edge'] else: lowercase_ : str = (image_processor.size['height'], image_processor.size['width']) lowercase_ : Optional[Any] = Compose( [ Lambda(lambda __SCREAMING_SNAKE_CASE : img.convert('RGB' ) if img.mode != "RGB" else img ), RandomResizedCrop(__SCREAMING_SNAKE_CASE , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ), RandomHorizontalFlip(), ToTensor(), Normalize(mean=image_processor.image_mean , std=image_processor.image_std ), ] ) def preprocess_images(__SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : List[Any] = [transforms(__SCREAMING_SNAKE_CASE ) for image in examples[image_column_name]] return examples if training_args.do_train: if "train" not in ds: raise ValueError('--do_train requires a train dataset' ) if data_args.max_train_samples is not None: lowercase_ : Any = ds['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) # Set the training transforms ds["train"].set_transform(__SCREAMING_SNAKE_CASE ) if training_args.do_eval: if "validation" not in ds: raise ValueError('--do_eval requires a validation dataset' ) if data_args.max_eval_samples is not None: lowercase_ : Dict = ( ds['validation'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms ds["validation"].set_transform(__SCREAMING_SNAKE_CASE ) # Compute absolute learning rate lowercase_ : Optional[Any] = ( training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size ) if training_args.base_learning_rate is not None: lowercase_ : str = training_args.base_learning_rate * total_train_batch_size / 2_56 # Initialize our trainer lowercase_ : Optional[Any] = Trainer( model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=ds['train'] if training_args.do_train else None , eval_dataset=ds['validation'] if training_args.do_eval else None , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: lowercase_ : str = None if training_args.resume_from_checkpoint is not None: lowercase_ : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: lowercase_ : Optional[int] = last_checkpoint lowercase_ : Dict = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics('train' , train_result.metrics ) trainer.save_metrics('train' , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: lowercase_ : Tuple = trainer.evaluate() trainer.log_metrics('eval' , __SCREAMING_SNAKE_CASE ) trainer.save_metrics('eval' , __SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub lowercase_ : str = { 'tasks': 'masked-auto-encoding', 'dataset': data_args.dataset_name, 'tags': ['masked-auto-encoding'], } if training_args.push_to_hub: trainer.push_to_hub(**__SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
321
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt" ), }, "tokenizer_file": { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json" ), }, } __SCREAMING_SNAKE_CASE ={ "yjernite/retribert-base-uncased": 512, } __SCREAMING_SNAKE_CASE ={ "yjernite/retribert-base-uncased": {"do_lower_case": True}, } class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = RetriBertTokenizer lowercase = ['input_ids', 'attention_mask'] def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> Dict: '''simple docstring''' super().__init__( __UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : str = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars ): lowercase_ : List[str] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) ) lowercase_ : Tuple = do_lower_case lowercase_ : List[Any] = strip_accents lowercase_ : Dict = tokenize_chinese_chars lowercase_ : Any = normalizer_class(**__UpperCamelCase ) lowercase_ : str = do_lower_case def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> int: '''simple docstring''' lowercase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : Dict = [self.sep_token_id] lowercase_ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : str = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase )
321
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
1
"""simple docstring""" import inspect import unittest from transformers import MobileViTVaConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel from transformers.models.mobilevitva.modeling_mobilevitva import ( MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST, make_divisible, ) if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class UpperCamelCase ( lowercase_ ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(__UpperCamelCase ,'width_multiplier' ) ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=64 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase="swish" ,__UpperCamelCase=3 ,__UpperCamelCase=32 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=10 ,__UpperCamelCase=None ,__UpperCamelCase=0.25 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,) -> Tuple: '''simple docstring''' lowercase_ : str = parent lowercase_ : Tuple = batch_size lowercase_ : Dict = image_size lowercase_ : Optional[int] = patch_size lowercase_ : int = num_channels lowercase_ : Optional[Any] = make_divisible(512 * width_multiplier ,divisor=8 ) lowercase_ : List[str] = hidden_act lowercase_ : List[Any] = conv_kernel_size lowercase_ : Dict = output_stride lowercase_ : List[Any] = classifier_dropout_prob lowercase_ : List[Any] = use_labels lowercase_ : Union[str, Any] = is_training lowercase_ : List[Any] = num_labels lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = scope lowercase_ : Tuple = width_multiplier lowercase_ : Optional[Any] = ffn_dropout lowercase_ : List[str] = attn_dropout def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Optional[Any] = None lowercase_ : List[Any] = None if self.use_labels: lowercase_ : Any = ids_tensor([self.batch_size] ,self.num_labels ) lowercase_ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels ) lowercase_ : Optional[int] = self.get_config() return config, pixel_values, labels, pixel_labels def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return MobileViTVaConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_act=self.hidden_act ,conv_kernel_size=self.conv_kernel_size ,output_stride=self.output_stride ,classifier_dropout_prob=self.classifier_dropout_prob ,initializer_range=self.initializer_range ,width_multiplier=self.width_multiplier ,ffn_dropout=self.ffn_dropout_prob ,attn_dropout=self.attn_dropout_prob ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Any = MobileViTVaModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : str = model(__UpperCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape ,( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = self.num_labels lowercase_ : Tuple = MobileViTVaForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : str = model(__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = self.num_labels lowercase_ : List[Any] = MobileViTVaForSemanticSegmentation(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) lowercase_ : Any = model(__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual( result.logits.shape ,( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) ,) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : str = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = config_and_inputs lowercase_ : Tuple = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = ( (MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': MobileViTVaModel, 'image-classification': MobileViTVaForImageClassification, 'image-segmentation': MobileViTVaForSemanticSegmentation, } if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = MobileViTVaModelTester(self ) lowercase_ : Union[str, Any] = MobileViTVaConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='MobileViTV2 does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not support input and output embeddings' ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' pass @unittest.skip(reason='MobileViTV2 does not output attentions' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='Got `CUDA error: misaligned address` for tests after this one being run.' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Any = model_class(__UpperCamelCase ) lowercase_ : List[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : int = [*signature.parameters.keys()] lowercase_ : str = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : Tuple = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : List[str] = outputs.hidden_states lowercase_ : str = 5 self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase ) # MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width) # with the width and height being successively divided by 2. lowercase_ : Dict = 2 for i in range(len(__UpperCamelCase ) ): self.assertListEqual( list(hidden_states[i].shape[-2:] ) ,[self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] ,) divisor *= 2 self.assertEqual(self.model_tester.output_stride ,divisor // 2 ) lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : int = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Union[str, Any] = MobileViTVaModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase__( ): lowercase_ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return ( MobileViTImageProcessor.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[Any] = MobileViTVaForImageClassification.from_pretrained('apple/mobilevitv2-1.0-imagenet1k-256' ).to( __UpperCamelCase ) lowercase_ : List[str] = self.default_image_processor lowercase_ : Tuple = prepare_img() lowercase_ : List[str] = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : int = model(**__UpperCamelCase ) # verify the logits lowercase_ : Any = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : Dict = model.to(__UpperCamelCase ) lowercase_ : List[str] = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : Tuple = prepare_img() lowercase_ : Dict = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : str = model(**__UpperCamelCase ) lowercase_ : Optional[Any] = outputs.logits # verify the logits lowercase_ : Union[str, Any] = torch.Size((1, 21, 32, 32) ) self.assertEqual(logits.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [ [[7.0863, 7.1525, 6.8201], [6.6931, 6.8770, 6.8933], [6.2978, 7.0366, 6.9636]], [[-3.7134, -3.6712, -3.6675], [-3.5825, -3.3549, -3.4777], [-3.3435, -3.3979, -3.2857]], [[-2.9329, -2.8003, -2.7369], [-3.0564, -2.4780, -2.0207], [-2.6889, -1.9298, -1.7640]], ] ,device=__UpperCamelCase ,) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = MobileViTVaForSemanticSegmentation.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : Optional[int] = model.to(__UpperCamelCase ) lowercase_ : Dict = MobileViTImageProcessor.from_pretrained('shehan97/mobilevitv2-1.0-voc-deeplabv3' ) lowercase_ : List[str] = prepare_img() lowercase_ : List[Any] = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : str = model(**__UpperCamelCase ) lowercase_ : Optional[Any] = outputs.logits.detach().cpu() lowercase_ : Any = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase ,target_sizes=[(50, 60)] ) lowercase_ : Optional[int] = torch.Size((50, 60) ) self.assertEqual(segmentation[0].shape ,__UpperCamelCase ) lowercase_ : str = image_processor.post_process_semantic_segmentation(outputs=__UpperCamelCase ) lowercase_ : Optional[int] = torch.Size((32, 32) ) self.assertEqual(segmentation[0].shape ,__UpperCamelCase )
321
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
1
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = feature_size lowercase_ : Optional[Any] = sampling_rate lowercase_ : int = padding_value lowercase_ : Dict = kwargs.pop('padding_side' ,'right' ) lowercase_ : str = kwargs.pop('return_attention_mask' ,__UpperCamelCase ) super().__init__(**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if isinstance(__UpperCamelCase ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ): lowercase_ : List[Any] = { key: [example[key] for example in processed_features] for key in processed_features[0].keys() } # The model's main input name, usually `input_values`, has be passed for padding if self.model_input_names[0] not in processed_features: raise ValueError( 'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`' f''' to this method that includes {self.model_input_names[0]}, but you provided''' f''' {list(processed_features.keys() )}''' ) lowercase_ : int = processed_features[self.model_input_names[0]] lowercase_ : Union[str, Any] = ( return_attention_mask if return_attention_mask is not None else self.return_attention_mask ) if len(__UpperCamelCase ) == 0: if return_attention_mask: lowercase_ : Union[str, Any] = [] return processed_features # If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays # and rebuild them afterwards if no return_tensors is specified # Note that we lose the specific device the tensor may be on for PyTorch lowercase_ : Union[str, Any] = required_input[0] if isinstance(__UpperCamelCase ,(list, tuple) ): # first_element might be an empty list/tuple in some edge cases so we grab the first non empty element. lowercase_ : List[Any] = 0 while len(required_input[index] ) == 0: index += 1 if index < len(__UpperCamelCase ): lowercase_ : List[str] = required_input[index][0] if return_tensors is None: if is_tf_tensor(__UpperCamelCase ): lowercase_ : str = 'tf' elif is_torch_tensor(__UpperCamelCase ): lowercase_ : Tuple = 'pt' elif isinstance(__UpperCamelCase ,(int, float, list, tuple, np.ndarray) ): lowercase_ : str = 'np' else: raise ValueError( f'''type of {first_element} unknown: {type(__UpperCamelCase )}. ''' 'Should be one of a python, numpy, pytorch or tensorflow object.' ) for key, value in processed_features.items(): if isinstance(value[0] ,(int, float) ): lowercase_ : List[str] = to_numpy(__UpperCamelCase ) else: lowercase_ : Optional[Any] = [to_numpy(__UpperCamelCase ) for v in value] # Convert padding_strategy in PaddingStrategy lowercase_ : int = self._get_padding_strategies(padding=__UpperCamelCase ,max_length=__UpperCamelCase ) lowercase_ : List[str] = processed_features[self.model_input_names[0]] lowercase_ : Union[str, Any] = len(__UpperCamelCase ) if not all(len(__UpperCamelCase ) == batch_size for v in processed_features.values() ): raise ValueError('Some items in the output dictionary have a different batch size than others.' ) lowercase_ : Dict = [] for i in range(__UpperCamelCase ): lowercase_ : Any = {k: v[i] for k, v in processed_features.items()} # truncation lowercase_ : List[Any] = self._truncate( __UpperCamelCase ,max_length=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,truncation=__UpperCamelCase ,) truncated_inputs.append(__UpperCamelCase ) if padding_strategy == PaddingStrategy.LONGEST: # make sure that `max_length` cannot be longer than the longest truncated length lowercase_ : Union[str, Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs ) lowercase_ : Dict = PaddingStrategy.MAX_LENGTH lowercase_ : List[str] = {} for i in range(__UpperCamelCase ): # padding lowercase_ : str = self._pad( truncated_inputs[i] ,max_length=__UpperCamelCase ,padding_strategy=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) for key, value in outputs.items(): if key not in batch_outputs: lowercase_ : Optional[int] = [] if value.dtype is np.dtype(np.floataa ): lowercase_ : Optional[int] = value.astype(np.floataa ) batch_outputs[key].append(__UpperCamelCase ) return BatchFeature(__UpperCamelCase ,tensor_type=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = PaddingStrategy.DO_NOT_PAD ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> dict: '''simple docstring''' lowercase_ : Optional[Any] = processed_features[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: lowercase_ : Optional[Any] = len(__UpperCamelCase ) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowercase_ : int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowercase_ : str = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(__UpperCamelCase ) < max_length if return_attention_mask and "attention_mask" not in processed_features: lowercase_ : str = np.ones(len(__UpperCamelCase ) ,dtype=np.intaa ) if needs_to_be_padded: lowercase_ : Optional[int] = max_length - len(__UpperCamelCase ) if self.padding_side == "right": if return_attention_mask: lowercase_ : Union[str, Any] = np.pad( processed_features['attention_mask'] ,(0, difference) ) lowercase_ : Optional[Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference) lowercase_ : Tuple = np.pad( __UpperCamelCase ,__UpperCamelCase ,'constant' ,constant_values=self.padding_value ) elif self.padding_side == "left": if return_attention_mask: lowercase_ : Optional[Any] = np.pad( processed_features['attention_mask'] ,(difference, 0) ) lowercase_ : List[Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0) lowercase_ : Any = np.pad( __UpperCamelCase ,__UpperCamelCase ,'constant' ,constant_values=self.padding_value ) else: raise ValueError('Invalid padding strategy:' + str(self.padding_side ) ) return processed_features def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> int: '''simple docstring''' if not truncation: return processed_features elif truncation and max_length is None: raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' ) lowercase_ : Union[str, Any] = processed_features[self.model_input_names[0]] # find `max_length` that fits `pad_to_multiple_of` if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): lowercase_ : List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of lowercase_ : Any = len(__UpperCamelCase ) > max_length if needs_to_be_truncated: lowercase_ : Any = processed_features[self.model_input_names[0]][:max_length] if "attention_mask" in processed_features: lowercase_ : Union[str, Any] = processed_features['attention_mask'][:max_length] return processed_features def _UpperCAmelCase ( self ,__UpperCamelCase=False ,__UpperCamelCase=None ) -> Dict: '''simple docstring''' if padding is not False: if padding is True: lowercase_ : Optional[Any] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch elif not isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Optional[Any] = PaddingStrategy(__UpperCamelCase ) elif isinstance(__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Union[str, Any] = padding else: lowercase_ : Union[str, Any] = PaddingStrategy.DO_NOT_PAD # Set max length if needed if max_length is None: if padding_strategy == PaddingStrategy.MAX_LENGTH: raise ValueError( f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' ) # Test if we have a padding value if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None): raise ValueError( 'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use' ' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' ) return padding_strategy
321
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
1
"""simple docstring""" import argparse import glob import importlib.util import os import re import black from doc_builder.style_doc import style_docstrings_in_code # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __SCREAMING_SNAKE_CASE ="src/diffusers" __SCREAMING_SNAKE_CASE ="." # This is to make sure the diffusers module imported is the one in the repo. __SCREAMING_SNAKE_CASE =importlib.util.spec_from_file_location( "diffusers", os.path.join(DIFFUSERS_PATH, "__init__.py"), submodule_search_locations=[DIFFUSERS_PATH], ) __SCREAMING_SNAKE_CASE =spec.loader.load_module() def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ): return line.startswith(__SCREAMING_SNAKE_CASE ) or len(__SCREAMING_SNAKE_CASE ) <= 1 or re.search(R'^\s*\)(\s*->.*:|:)\s*$' , __SCREAMING_SNAKE_CASE ) is not None def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : Union[str, Any] = object_name.split('.' ) lowercase_ : List[str] = 0 # First let's find the module where our object lives. lowercase_ : List[Any] = parts[i] while i < len(__SCREAMING_SNAKE_CASE ) and not os.path.isfile(os.path.join(__SCREAMING_SNAKE_CASE , F'''{module}.py''' ) ): i += 1 if i < len(__SCREAMING_SNAKE_CASE ): lowercase_ : Tuple = os.path.join(__SCREAMING_SNAKE_CASE , parts[i] ) if i >= len(__SCREAMING_SNAKE_CASE ): raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''' ) with open(os.path.join(__SCREAMING_SNAKE_CASE , F'''{module}.py''' ) , 'r' , encoding='utf-8' , newline='\n' ) as f: lowercase_ : int = f.readlines() # Now let's find the class / func in the code! lowercase_ : Union[str, Any] = '' lowercase_ : Optional[int] = 0 for name in parts[i + 1 :]: while ( line_index < len(__SCREAMING_SNAKE_CASE ) and re.search(RF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index] ) is None ): line_index += 1 indent += " " line_index += 1 if line_index >= len(__SCREAMING_SNAKE_CASE ): raise ValueError(F''' {object_name} does not match any function or class in {module}.''' ) # We found the beginning of the class / func, now let's find the end (when the indent diminishes). lowercase_ : Union[str, Any] = line_index while line_index < len(__SCREAMING_SNAKE_CASE ) and _should_continue(lines[line_index] , __SCREAMING_SNAKE_CASE ): line_index += 1 # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowercase_ : Union[str, Any] = lines[start_index:line_index] return "".join(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE =re.compile(r"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)") __SCREAMING_SNAKE_CASE =re.compile(r"^\s*(\S+)->(\S+)(\s+.*|$)") __SCREAMING_SNAKE_CASE =re.compile(r"<FILL\s+[^>]*>") def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : Dict = code.split('\n' ) lowercase_ : List[str] = 0 while idx < len(__SCREAMING_SNAKE_CASE ) and len(lines[idx] ) == 0: idx += 1 if idx < len(__SCREAMING_SNAKE_CASE ): return re.search(R'^(\s*)\S' , lines[idx] ).groups()[0] return "" def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : Optional[int] = len(get_indent(__SCREAMING_SNAKE_CASE ) ) > 0 if has_indent: lowercase_ : Dict = F'''class Bla:\n{code}''' lowercase_ : List[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 , preview=__SCREAMING_SNAKE_CASE ) lowercase_ : str = black.format_str(__SCREAMING_SNAKE_CASE , mode=__SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : str = style_docstrings_in_code(__SCREAMING_SNAKE_CASE ) return result[len('class Bla:\n' ) :] if has_indent else result def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int=False ): with open(__SCREAMING_SNAKE_CASE , 'r' , encoding='utf-8' , newline='\n' ) as f: lowercase_ : Dict = f.readlines() lowercase_ : Optional[int] = [] lowercase_ : Union[str, Any] = 0 # Not a for loop cause `lines` is going to change (if `overwrite=True`). while line_index < len(__SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = _re_copy_warning.search(lines[line_index] ) if search is None: line_index += 1 continue # There is some copied code here, let's retrieve the original. lowercase_ , lowercase_ , lowercase_ : Dict = search.groups() lowercase_ : Tuple = find_code_in_diffusers(__SCREAMING_SNAKE_CASE ) lowercase_ : int = get_indent(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2 lowercase_ : Tuple = theoretical_indent lowercase_ : Any = start_index # Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment. lowercase_ : List[str] = True while line_index < len(__SCREAMING_SNAKE_CASE ) and should_continue: line_index += 1 if line_index >= len(__SCREAMING_SNAKE_CASE ): break lowercase_ : Union[str, Any] = lines[line_index] lowercase_ : Dict = _should_continue(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and re.search(F'''^{indent}# End copy''' , __SCREAMING_SNAKE_CASE ) is None # Clean up empty lines at the end (if any). while len(lines[line_index - 1] ) <= 1: line_index -= 1 lowercase_ : Any = lines[start_index:line_index] lowercase_ : Dict = ''.join(__SCREAMING_SNAKE_CASE ) # Remove any nested `Copied from` comments to avoid circular copies lowercase_ : Dict = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(__SCREAMING_SNAKE_CASE ) is None] lowercase_ : Optional[int] = '\n'.join(__SCREAMING_SNAKE_CASE ) # Before comparing, use the `replace_pattern` on the original code. if len(__SCREAMING_SNAKE_CASE ) > 0: lowercase_ : Dict = replace_pattern.replace('with' , '' ).split(',' ) lowercase_ : Tuple = [_re_replace_pattern.search(__SCREAMING_SNAKE_CASE ) for p in patterns] for pattern in patterns: if pattern is None: continue lowercase_ , lowercase_ , lowercase_ : Tuple = pattern.groups() lowercase_ : Optional[int] = re.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if option.strip() == "all-casing": lowercase_ : Union[str, Any] = re.sub(obja.lower() , obja.lower() , __SCREAMING_SNAKE_CASE ) lowercase_ : str = re.sub(obja.upper() , obja.upper() , __SCREAMING_SNAKE_CASE ) # Blackify after replacement. To be able to do that, we need the header (class or function definition) # from the previous line lowercase_ : Optional[int] = blackify(lines[start_index - 1] + theoretical_code ) lowercase_ : Tuple = theoretical_code[len(lines[start_index - 1] ) :] # Test for a diff and act accordingly. if observed_code != theoretical_code: diffs.append([object_name, start_index] ) if overwrite: lowercase_ : Union[str, Any] = lines[:start_index] + [theoretical_code] + lines[line_index:] lowercase_ : Union[str, Any] = start_index + 1 if overwrite and len(__SCREAMING_SNAKE_CASE ) > 0: # Warn the user a file has been modified. print(F'''Detected changes, rewriting {filename}.''' ) with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' , newline='\n' ) as f: f.writelines(__SCREAMING_SNAKE_CASE ) return diffs def lowercase__( __SCREAMING_SNAKE_CASE : bool = False ): lowercase_ : str = glob.glob(os.path.join(__SCREAMING_SNAKE_CASE , '**/*.py' ) , recursive=__SCREAMING_SNAKE_CASE ) lowercase_ : str = [] for filename in all_files: lowercase_ : Any = is_copy_consistent(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs] if not overwrite and len(__SCREAMING_SNAKE_CASE ) > 0: lowercase_ : Optional[int] = '\n'.join(__SCREAMING_SNAKE_CASE ) raise Exception( 'Found the following copy inconsistencies:\n' + diff + '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.") __SCREAMING_SNAKE_CASE =parser.parse_args() check_copies(args.fix_and_overwrite)
321
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
1
"""simple docstring""" import argparse import json import os from tensorflow.core.protobuf.saved_model_pba import SavedModel # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_copies.py __SCREAMING_SNAKE_CASE ="." # Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model) __SCREAMING_SNAKE_CASE =[ "Assert", "AssignVariableOp", "EmptyTensorList", "MergeV2Checkpoints", "ReadVariableOp", "ResourceGather", "RestoreV2", "SaveV2", "ShardedFilename", "StatefulPartitionedCall", "StaticRegexFullMatch", "VarHandleOp", ] def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : int = SavedModel() lowercase_ : Union[str, Any] = [] with open(os.path.join(__SCREAMING_SNAKE_CASE , 'utils' , 'tf_ops' , 'onnx.json' ) ) as f: lowercase_ : List[str] = json.load(__SCREAMING_SNAKE_CASE )['opsets'] for i in range(1 , opset + 1 ): onnx_ops.extend(onnx_opsets[str(__SCREAMING_SNAKE_CASE )] ) with open(__SCREAMING_SNAKE_CASE , 'rb' ) as f: saved_model.ParseFromString(f.read() ) lowercase_ : Any = set() # Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs) for meta_graph in saved_model.meta_graphs: # Add operations in the graph definition model_op_names.update(node.op for node in meta_graph.graph_def.node ) # Go through the functions in the graph definition for func in meta_graph.graph_def.library.function: # Add operations in each function model_op_names.update(node.op for node in func.node_def ) # Convert to list, sorted if you want lowercase_ : int = sorted(__SCREAMING_SNAKE_CASE ) lowercase_ : str = [] for op in model_op_names: if op not in onnx_ops and op not in INTERNAL_OPS: incompatible_ops.append(__SCREAMING_SNAKE_CASE ) if strict and len(__SCREAMING_SNAKE_CASE ) > 0: raise Exception(F'''Found the following incompatible ops for the opset {opset}:\n''' + incompatible_ops ) elif len(__SCREAMING_SNAKE_CASE ) > 0: print(F'''Found the following incompatible ops for the opset {opset}:''' ) print(*__SCREAMING_SNAKE_CASE , sep='\n' ) else: print(F'''The saved model {saved_model_path} can properly be converted with ONNX.''' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--saved_model_path", help="Path of the saved model to check (the .pb file).") parser.add_argument( "--opset", default=12, type=int, help="The ONNX opset against which the model has to be tested." ) parser.add_argument( "--framework", choices=["onnx"], default="onnx", help="Frameworks against which to test the saved model." ) parser.add_argument( "--strict", action="store_true", help="Whether make the checking strict (raise errors) or not (raise warnings)" ) __SCREAMING_SNAKE_CASE =parser.parse_args() if args.framework == "onnx": onnx_compliancy(args.saved_model_path, args.strict, args.opset)
321
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import LevitImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = size if size is not None else {'shortest_edge': 18} lowercase_ : str = crop_size if crop_size is not None else {'height': 18, 'width': 18} lowercase_ : Tuple = parent lowercase_ : List[Any] = batch_size lowercase_ : Any = num_channels lowercase_ : str = image_size lowercase_ : Tuple = min_resolution lowercase_ : List[Any] = max_resolution lowercase_ : List[Any] = do_resize lowercase_ : Optional[Any] = size lowercase_ : str = do_center_crop lowercase_ : str = crop_size lowercase_ : Optional[Any] = do_normalize lowercase_ : str = image_mean lowercase_ : Dict = image_std def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "do_center_crop": self.do_center_crop, "size": self.size, "crop_size": self.crop_size, } @require_torch @require_vision class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = LevitImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[Any] = LevitImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_center_crop' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'size' ) ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'shortest_edge': 18} ) self.assertEqual(image_processor.crop_size ,{'height': 18, 'width': 18} ) lowercase_ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : int = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : Any = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,Image.Image ) # Test not batched input lowercase_ : List[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Optional[int] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,np.ndarray ) # Test not batched input lowercase_ : Optional[Any] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Union[str, Any] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,torch.Tensor ) # Test not batched input lowercase_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Any = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,)
321
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
1
"""simple docstring""" from math import factorial __SCREAMING_SNAKE_CASE ={str(d): factorial(d) for d in range(10)} def lowercase__( __SCREAMING_SNAKE_CASE : int ): return sum(DIGIT_FACTORIAL[d] for d in str(__SCREAMING_SNAKE_CASE ) ) def lowercase__( ): lowercase_ : Dict = 7 * factorial(9 ) + 1 return sum(i for i in range(3 , __SCREAMING_SNAKE_CASE ) if sum_of_digit_factorial(__SCREAMING_SNAKE_CASE ) == i ) if __name__ == "__main__": print(F"{solution() = }")
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
1
"""simple docstring""" from __future__ import annotations import time import numpy as np __SCREAMING_SNAKE_CASE =[8, 5, 9, 7] __SCREAMING_SNAKE_CASE =[ [2, 0, 1, 1], [0, 1, 2, 1], [4, 0, 0, 3], [0, 2, 1, 0], [1, 0, 3, 0], ] __SCREAMING_SNAKE_CASE =[ [3, 2, 1, 4], [0, 2, 5, 2], [5, 1, 0, 5], [1, 5, 3, 0], [3, 0, 3, 3], ] class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : Tuple = claim_vector lowercase_ : Optional[Any] = allocated_resources_table lowercase_ : str = maximum_claim_table def _UpperCAmelCase ( self ) -> list[int]: '''simple docstring''' return [ sum(p_item[i] for p_item in self.__allocated_resources_table ) for i in range(len(self.__allocated_resources_table[0] ) ) ] def _UpperCAmelCase ( self ) -> list[int]: '''simple docstring''' return np.array(self.__claim_vector ) - np.array( self.__processes_resource_summation() ) def _UpperCAmelCase ( self ) -> list[list[int]]: '''simple docstring''' return [ list(np.array(self.__maximum_claim_table[i] ) - np.array(__UpperCamelCase ) ) for i, allocated_resource in enumerate(self.__allocated_resources_table ) ] def _UpperCAmelCase ( self ) -> dict[int, list[int]]: '''simple docstring''' return {self.__need().index(__UpperCamelCase ): i for i in self.__need()} def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Optional[Any] = self.__need() lowercase_ : List[Any] = self.__allocated_resources_table lowercase_ : List[Any] = self.__available_resources() lowercase_ : int = self.__need_index_manager() for kw, val in kwargs.items(): if kw and val is True: self.__pretty_data() print('_' * 50 + '\n' ) while need_list: lowercase_ : Tuple = False for each_need in need_list: lowercase_ : Any = True for index, need in enumerate(__UpperCamelCase ): if need > available_resources[index]: lowercase_ : Tuple = False break if execution: lowercase_ : Optional[Any] = True # get the original index of the process from ind_ctrl db for original_need_index, need_clone in need_index_manager.items(): if each_need == need_clone: lowercase_ : int = original_need_index print(f'''Process {process_number + 1} is executing.''' ) # remove the process run from stack need_list.remove(__UpperCamelCase ) # update available/freed resources stack lowercase_ : List[str] = np.array(__UpperCamelCase ) + np.array( alloc_resources_table[process_number] ) print( 'Updated available resource stack for processes: ' + ' '.join([str(__UpperCamelCase ) for x in available_resources] ) ) break if safe: print('The process is in a safe state.\n' ) else: print('System in unsafe state. Aborting...\n' ) break def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' print(' ' * 9 + 'Allocated Resource Table' ) for item in self.__allocated_resources_table: print( f'''P{self.__allocated_resources_table.index(__UpperCamelCase ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print(' ' * 9 + 'System Resource Table' ) for item in self.__maximum_claim_table: print( f'''P{self.__maximum_claim_table.index(__UpperCamelCase ) + 1}''' + ' '.join(f'''{it:>8}''' for it in item ) + '\n' ) print( 'Current Usage by Active Processes: ' + ' '.join(str(__UpperCamelCase ) for x in self.__claim_vector ) ) print( 'Initial Available Resources: ' + ' '.join(str(__UpperCamelCase ) for x in self.__available_resources() ) ) time.sleep(1 ) if __name__ == "__main__": import doctest doctest.testmod()
321
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import PoolFormerImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=0.9 ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=[0.5, 0.5, 0.5] ,__UpperCamelCase=[0.5, 0.5, 0.5] ,) -> Optional[int]: '''simple docstring''' lowercase_ : List[Any] = size if size is not None else {'shortest_edge': 30} lowercase_ : List[Any] = crop_size if crop_size is not None else {'height': 30, 'width': 30} lowercase_ : Any = parent lowercase_ : List[str] = batch_size lowercase_ : str = num_channels lowercase_ : Any = min_resolution lowercase_ : str = max_resolution lowercase_ : List[str] = do_resize_and_center_crop lowercase_ : Any = size lowercase_ : int = crop_pct lowercase_ : Optional[Any] = crop_size lowercase_ : Tuple = do_normalize lowercase_ : List[Any] = image_mean lowercase_ : List[str] = image_std def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return { "size": self.size, "do_resize_and_center_crop": self.do_resize_and_center_crop, "crop_pct": self.crop_pct, "crop_size": self.crop_size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = PoolFormerImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Any = PoolFormerImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase ,'do_resize_and_center_crop' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'size' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'crop_pct' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'image_mean' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'image_std' ) ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'shortest_edge': 30} ) self.assertEqual(image_processor.crop_size ,{'height': 30, 'width': 30} ) lowercase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 ) self.assertEqual(image_processor.size ,{'shortest_edge': 42} ) self.assertEqual(image_processor.crop_size ,{'height': 84, 'width': 84} ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase_ : Optional[int] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,Image.Image ) # Test not batched input lowercase_ : Tuple = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Optional[Any] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,numpify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,np.ndarray ) # Test not batched input lowercase_ : List[str] = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : List[Any] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase_ : Dict = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__UpperCamelCase ,torchify=__UpperCamelCase ) for image in image_inputs: self.assertIsInstance(__UpperCamelCase ,torch.Tensor ) # Test not batched input lowercase_ : str = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,) # Test batched lowercase_ : Optional[Any] = image_processing(__UpperCamelCase ,return_tensors='pt' ).pixel_values self.assertEqual( encoded_images.shape ,( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['height'], self.image_processor_tester.crop_size['width'], ) ,)
321
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
1
"""simple docstring""" import argparse from collections import OrderedDict from pathlib import Path import requests import torch from PIL import Image from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : int = OrderedDict() for key, value in state_dict.items(): if key.startswith('module.encoder' ): lowercase_ : Union[str, Any] = key.replace('module.encoder' , 'glpn.encoder' ) if key.startswith('module.decoder' ): lowercase_ : int = key.replace('module.decoder' , 'decoder.stages' ) if "patch_embed" in key: # replace for example patch_embed1 by patch_embeddings.0 lowercase_ : str = key[key.find('patch_embed' ) + len('patch_embed' )] lowercase_ : Optional[Any] = key.replace(F'''patch_embed{idx}''' , F'''patch_embeddings.{int(__SCREAMING_SNAKE_CASE )-1}''' ) if "norm" in key: lowercase_ : str = key.replace('norm' , 'layer_norm' ) if "glpn.encoder.layer_norm" in key: # replace for example layer_norm1 by layer_norm.0 lowercase_ : Union[str, Any] = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )] lowercase_ : str = key.replace(F'''layer_norm{idx}''' , F'''layer_norm.{int(__SCREAMING_SNAKE_CASE )-1}''' ) if "layer_norm1" in key: lowercase_ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' ) if "layer_norm2" in key: lowercase_ : str = key.replace('layer_norm2' , 'layer_norm_2' ) if "block" in key: # replace for example block1 by block.0 lowercase_ : str = key[key.find('block' ) + len('block' )] lowercase_ : Optional[Any] = key.replace(F'''block{idx}''' , F'''block.{int(__SCREAMING_SNAKE_CASE )-1}''' ) if "attn.q" in key: lowercase_ : Optional[int] = key.replace('attn.q' , 'attention.self.query' ) if "attn.proj" in key: lowercase_ : Optional[int] = key.replace('attn.proj' , 'attention.output.dense' ) if "attn" in key: lowercase_ : Optional[int] = key.replace('attn' , 'attention.self' ) if "fc1" in key: lowercase_ : str = key.replace('fc1' , 'dense1' ) if "fc2" in key: lowercase_ : Dict = key.replace('fc2' , 'dense2' ) if "linear_pred" in key: lowercase_ : int = key.replace('linear_pred' , 'classifier' ) if "linear_fuse" in key: lowercase_ : str = key.replace('linear_fuse.conv' , 'linear_fuse' ) lowercase_ : Optional[int] = key.replace('linear_fuse.bn' , 'batch_norm' ) if "linear_c" in key: # replace for example linear_c4 by linear_c.3 lowercase_ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )] lowercase_ : List[str] = key.replace(F'''linear_c{idx}''' , F'''linear_c.{int(__SCREAMING_SNAKE_CASE )-1}''' ) if "bot_conv" in key: lowercase_ : Union[str, Any] = key.replace('bot_conv' , '0.convolution' ) if "skip_conv1" in key: lowercase_ : Optional[Any] = key.replace('skip_conv1' , '1.convolution' ) if "skip_conv2" in key: lowercase_ : Dict = key.replace('skip_conv2' , '2.convolution' ) if "fusion1" in key: lowercase_ : int = key.replace('fusion1' , '1.fusion' ) if "fusion2" in key: lowercase_ : int = key.replace('fusion2' , '2.fusion' ) if "fusion3" in key: lowercase_ : Dict = key.replace('fusion3' , '3.fusion' ) if "fusion" in key and "conv" in key: lowercase_ : int = key.replace('conv' , 'convolutional_layer' ) if key.startswith('module.last_layer_depth' ): lowercase_ : List[Any] = key.replace('module.last_layer_depth' , 'head.head' ) lowercase_ : List[Any] = value return new_state_dict def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] ): # for each of the encoder blocks: for i in range(config.num_encoder_blocks ): for j in range(config.depths[i] ): # read in weights + bias of keys and values (which is a single matrix in the original implementation) lowercase_ : Optional[Any] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.weight''' ) lowercase_ : Union[str, Any] = state_dict.pop(F'''glpn.encoder.block.{i}.{j}.attention.self.kv.bias''' ) # next, add keys and values (in that order) to the state dict lowercase_ : List[str] = kv_weight[ : config.hidden_sizes[i], : ] lowercase_ : List[Any] = kv_bias[: config.hidden_sizes[i]] lowercase_ : str = kv_weight[ config.hidden_sizes[i] :, : ] lowercase_ : Tuple = kv_bias[config.hidden_sizes[i] :] def lowercase__( ): lowercase_ : int = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Optional[Any] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return image @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : Any=None ): lowercase_ : Any = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] ) # load image processor (only resize + rescale) lowercase_ : Union[str, Any] = GLPNImageProcessor() # prepare image lowercase_ : Any = prepare_img() lowercase_ : Dict = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values logger.info('Converting model...' ) # load original state dict lowercase_ : Optional[int] = torch.load(__SCREAMING_SNAKE_CASE , map_location=torch.device('cpu' ) ) # rename keys lowercase_ : List[Any] = rename_keys(__SCREAMING_SNAKE_CASE ) # key and value matrices need special treatment read_in_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # create HuggingFace model and load state dict lowercase_ : int = GLPNForDepthEstimation(__SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) model.eval() # forward pass lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : str = outputs.predicted_depth # verify output if model_name is not None: if "nyu" in model_name: lowercase_ : List[str] = torch.tensor( [[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] ) elif "kitti" in model_name: lowercase_ : List[Any] = torch.tensor( [[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] ) else: raise ValueError(F'''Unknown model name: {model_name}''' ) lowercase_ : Optional[Any] = torch.Size([1, 4_80, 6_40] ) assert predicted_depth.shape == expected_shape assert torch.allclose(predicted_depth[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) print('Looks ok!' ) # finally, push to hub if required if push_to_hub: logger.info('Pushing model and image processor to the hub...' ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) parser.add_argument( "--model_name", default="glpn-kitti", type=str, help="Name of the model in case you're pushing to the hub.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
321
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
1
"""simple docstring""" import argparse import os import shutil import torch from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer def lowercase__( __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : str = args.pruning_method lowercase_ : Dict = args.threshold lowercase_ : Tuple = args.model_name_or_path.rstrip('/' ) lowercase_ : Union[str, Any] = args.target_model_path print(F'''Load fine-pruned model from {model_name_or_path}''' ) lowercase_ : Any = torch.load(os.path.join(__SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) lowercase_ : str = {} for name, tensor in model.items(): if "embeddings" in name or "LayerNorm" in name or "pooler" in name: lowercase_ : int = tensor print(F'''Copied layer {name}''' ) elif "classifier" in name or "qa_output" in name: lowercase_ : List[str] = tensor print(F'''Copied layer {name}''' ) elif "bias" in name: lowercase_ : int = tensor print(F'''Copied layer {name}''' ) else: if pruning_method == "magnitude": lowercase_ : List[str] = MagnitudeBinarizer.apply(inputs=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE ) lowercase_ : str = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "topK": if "mask_scores" in name: continue lowercase_ : Tuple = name[:-6] lowercase_ : Any = model[F'''{prefix_}mask_scores'''] lowercase_ : int = TopKBinarizer.apply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "sigmoied_threshold": if "mask_scores" in name: continue lowercase_ : List[Any] = name[:-6] lowercase_ : Union[str, Any] = model[F'''{prefix_}mask_scores'''] lowercase_ : List[str] = ThresholdBinarizer.apply(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : int = tensor * mask print(F'''Pruned layer {name}''' ) elif pruning_method == "l0": if "mask_scores" in name: continue lowercase_ : List[str] = name[:-6] lowercase_ : Dict = model[F'''{prefix_}mask_scores'''] lowercase_ , lowercase_ : str = -0.1, 1.1 lowercase_ : Any = torch.sigmoid(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = s * (r - l) + l lowercase_ : Union[str, Any] = s_bar.clamp(min=0.0 , max=1.0 ) lowercase_ : List[Any] = tensor * mask print(F'''Pruned layer {name}''' ) else: raise ValueError('Unknown pruning method' ) if target_model_path is None: lowercase_ : Dict = os.path.join( os.path.dirname(__SCREAMING_SNAKE_CASE ) , F'''bertarized_{os.path.basename(__SCREAMING_SNAKE_CASE )}''' ) if not os.path.isdir(__SCREAMING_SNAKE_CASE ): shutil.copytree(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print(F'''\nCreated folder {target_model_path}''' ) torch.save(__SCREAMING_SNAKE_CASE , os.path.join(__SCREAMING_SNAKE_CASE , 'pytorch_model.bin' ) ) print('\nPruned model saved! See you later!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--pruning_method", choices=["l0", "magnitude", "topK", "sigmoied_threshold"], type=str, required=True, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)" ), ) parser.add_argument( "--threshold", type=float, required=False, help=( "For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model." "For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared." "Not needed for `l0`" ), ) parser.add_argument( "--model_name_or_path", type=str, required=True, help="Folder containing the model that was previously fine-pruned", ) parser.add_argument( "--target_model_path", default=None, type=str, required=False, help="Folder containing the model that was previously fine-pruned", ) __SCREAMING_SNAKE_CASE =parser.parse_args() main(args)
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available __SCREAMING_SNAKE_CASE ={ "configuration_mobilenet_v2": [ "MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileNetV2Config", "MobileNetV2OnnxConfig", ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =["MobileNetV2FeatureExtractor"] __SCREAMING_SNAKE_CASE =["MobileNetV2ImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileNetV2ForImageClassification", "MobileNetV2ForSemanticSegmentation", "MobileNetV2Model", "MobileNetV2PreTrainedModel", "load_tf_weights_in_mobilenet_v2", ] if TYPE_CHECKING: from .configuration_mobilenet_va import ( MOBILENET_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileNetVaConfig, MobileNetVaOnnxConfig, ) try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_mobilenet_va import MobileNetVaFeatureExtractor from .image_processing_mobilenet_va import MobileNetVaImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mobilenet_va import ( MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel, MobileNetVaPreTrainedModel, load_tf_weights_in_mobilenet_va, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
321
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
1
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=32 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=16 ,__UpperCamelCase=[32, 64, 128] ,__UpperCamelCase=[1, 2, 1] ,__UpperCamelCase=[2, 2, 4] ,__UpperCamelCase=2 ,__UpperCamelCase=2.0 ,__UpperCamelCase=True ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase="gelu" ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-5 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase=10 ,__UpperCamelCase=8 ,__UpperCamelCase=["stage1", "stage2"] ,__UpperCamelCase=[1, 2] ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = parent lowercase_ : Tuple = batch_size lowercase_ : Tuple = image_size lowercase_ : List[Any] = patch_size lowercase_ : Optional[int] = num_channels lowercase_ : str = embed_dim lowercase_ : Any = hidden_sizes lowercase_ : Union[str, Any] = depths lowercase_ : Any = num_heads lowercase_ : Optional[Any] = window_size lowercase_ : int = mlp_ratio lowercase_ : Dict = qkv_bias lowercase_ : int = hidden_dropout_prob lowercase_ : Dict = attention_probs_dropout_prob lowercase_ : Dict = drop_path_rate lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = use_absolute_embeddings lowercase_ : Optional[Any] = patch_norm lowercase_ : int = layer_norm_eps lowercase_ : Union[str, Any] = initializer_range lowercase_ : str = is_training lowercase_ : Union[str, Any] = scope lowercase_ : Optional[Any] = use_labels lowercase_ : List[Any] = type_sequence_label_size lowercase_ : Any = encoder_stride lowercase_ : Dict = out_features lowercase_ : List[Any] = out_indices def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Any = None if self.use_labels: lowercase_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : Dict = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return FocalNetConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Dict = FocalNetModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ) lowercase_ : int = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) lowercase_ : List[Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Union[str, Any] = FocalNetBackbone(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] ) # verify channels self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) ) self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] ) # verify backbone works with out_features=None lowercase_ : str = None lowercase_ : int = FocalNetBackbone(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[str] = model(__UpperCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) ,1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) ,1 ) self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = FocalNetForMaskedImageModeling(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ) self.parent.assertEqual( result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images lowercase_ : Union[str, Any] = 1 lowercase_ : int = FocalNetForMaskedImageModeling(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Optional[int] = self.type_sequence_label_size lowercase_ : str = FocalNetForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) # test greyscale images lowercase_ : List[str] = 1 lowercase_ : int = FocalNetForImageClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : Any = model(__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : int = config_and_inputs lowercase_ : Optional[int] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) lowercase = ( {'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : str = FocalNetModelTester(self ) lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,embed_dim=37 ,has_text_modality=__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase ) @unittest.skip(reason='FocalNet does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip(reason='FocalNet does not use feedforward chunking' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowercase_ : str = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase_ : str = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase ,nn.Linear ) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: lowercase_ : Union[str, Any] = model_class(__UpperCamelCase ) lowercase_ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Optional[int] = [*signature.parameters.keys()] lowercase_ : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : int = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : Optional[int] = outputs.hidden_states lowercase_ : Tuple = getattr( self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase ) # FocalNet has a different seq_length lowercase_ : Tuple = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase_ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) lowercase_ : List[Any] = outputs.reshaped_hidden_states self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = reshaped_hidden_states[0].shape lowercase_ : int = ( reshaped_hidden_states[0].view(__UpperCamelCase ,__UpperCamelCase ,height * width ).permute(0 ,2 ,1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ : Any = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Tuple = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: lowercase_ : int = True self.check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : str = True self.check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Optional[int] = 3 lowercase_ : Optional[int] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size ,collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) lowercase_ : str = ( config.patch_size if isinstance(config.patch_size ,collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) lowercase_ : int = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) lowercase_ : str = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: lowercase_ : Any = True self.check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,(padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : List[str] = True self.check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,(padded_height, padded_width) ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Union[str, Any] = FocalNetModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : str = _config_zero_init(__UpperCamelCase ) for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(config=__UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @require_vision @require_torch class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return AutoImageProcessor.from_pretrained('microsoft/focalnet-tiny' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[Any] = FocalNetForImageClassification.from_pretrained('microsoft/focalnet-tiny' ).to(__UpperCamelCase ) lowercase_ : Optional[Any] = self.default_image_processor lowercase_ : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) lowercase_ : Any = image_processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : Optional[Any] = model(**__UpperCamelCase ) # verify the logits lowercase_ : List[str] = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : List[Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) ) self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 ) @require_torch class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = (FocalNetBackbone,) if is_torch_available() else () lowercase = FocalNetConfig lowercase = False def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[Any] = FocalNetModelTester(self )
321
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
1
"""simple docstring""" import qiskit def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Union[str, Any] = qiskit.Aer.get_backend('aer_simulator' ) # Create a Quantum Circuit acting on the q register lowercase_ : Tuple = qiskit.QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Apply X (NOT) Gate to Qubits 0 & 1 circuit.x(0 ) circuit.x(1 ) # Map the quantum measurement to the classical bits circuit.measure([0, 1] , [0, 1] ) # Execute the circuit on the qasm simulator lowercase_ : int = qiskit.execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=10_00 ) # Return the histogram data of the results of the experiment. return job.result().get_counts(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =single_qubit_measure(2, 2) print(F"Total count for various states are: {counts}")
321
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
1
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int = 10_00 ): lowercase_ : Optional[Any] = 2**power lowercase_ : Dict = str(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = list(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = 0 for i in list_num: sum_of_num += int(__SCREAMING_SNAKE_CASE ) return sum_of_num if __name__ == "__main__": __SCREAMING_SNAKE_CASE =int(input("Enter the power of 2: ").strip()) print("2 ^ ", power, " = ", 2**power) __SCREAMING_SNAKE_CASE =solution(power) print("Sum of the digits is: ", result)
321
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
1
"""simple docstring""" import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=10 ,__UpperCamelCase=3 ,__UpperCamelCase=2 ,__UpperCamelCase=2 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase="divided_space_time" ,__UpperCamelCase=None ,) -> str: '''simple docstring''' lowercase_ : Union[str, Any] = parent lowercase_ : str = batch_size lowercase_ : List[str] = image_size lowercase_ : Any = num_channels lowercase_ : int = patch_size lowercase_ : List[str] = num_frames lowercase_ : Union[str, Any] = is_training lowercase_ : List[str] = use_labels lowercase_ : List[Any] = hidden_size lowercase_ : Union[str, Any] = num_hidden_layers lowercase_ : List[Any] = num_attention_heads lowercase_ : Union[str, Any] = intermediate_size lowercase_ : Tuple = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : int = attention_probs_dropout_prob lowercase_ : Union[str, Any] = attention_type lowercase_ : str = initializer_range lowercase_ : Dict = scope lowercase_ : Tuple = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase_ : Optional[Any] = (image_size // patch_size) ** 2 lowercase_ : int = (num_frames) * self.num_patches_per_frame + 1 def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Optional[Any] = None if self.use_labels: lowercase_ : int = ids_tensor([self.batch_size] ,self.num_labels ) lowercase_ : str = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Tuple = TimesformerConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,num_frames=self.num_frames ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,attention_type=self.attention_type ,) lowercase_ : int = self.num_labels return config def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Any = TimesformerModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = TimesformerForVideoClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Any = model(__UpperCamelCase ) # verify the logits shape lowercase_ : Optional[int] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[Any] = self.prepare_config_and_inputs() lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = config_and_inputs lowercase_ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () lowercase = ( {'feature-extraction': TimesformerModel, 'video-classification': TimesformerForVideoClassification} if is_torch_available() else {} ) lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = TimesformerModelTester(self ) lowercase_ : Optional[int] = ConfigTester( self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Dict: '''simple docstring''' lowercase_ : int = copy.deepcopy(__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): lowercase_ : List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCamelCase ) return inputs_dict def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='TimeSformer does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[Any] = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) ) lowercase_ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase ,nn.Linear ) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[int] = model_class(__UpperCamelCase ) lowercase_ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Dict = [*signature.parameters.keys()] lowercase_ : str = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Optional[int] = TimesformerModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.has_attentions: pass else: lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : str = True for model_class in self.all_model_classes: lowercase_ : Any = self.model_tester.seq_length lowercase_ : int = self.model_tester.num_frames lowercase_ : Optional[Any] = True lowercase_ : Dict = False lowercase_ : Tuple = True lowercase_ : Dict = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : Optional[int] = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : Tuple = outputs.attentions self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase_ : Union[str, Any] = True lowercase_ : Optional[Any] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : Optional[Any] = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : str = outputs.attentions self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) # Check attention is always last and order is fine lowercase_ : Optional[Any] = True lowercase_ : Union[str, Any] = True lowercase_ : List[str] = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : int = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) self.assertEqual(out_len + 1 ,len(__UpperCamelCase ) ) lowercase_ : List[Any] = outputs.attentions self.assertEqual(len(__UpperCamelCase ) ,self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] ,) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : int = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : Dict = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : Tuple = outputs.hidden_states lowercase_ : List[Any] = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(__UpperCamelCase ) ,__UpperCamelCase ) lowercase_ : List[str] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,) lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Optional[int] = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Dict = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def lowercase__( ): lowercase_ : str = hf_hub_download( repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' ) lowercase_ : List[Any] = np.load(__SCREAMING_SNAKE_CASE ) return list(__SCREAMING_SNAKE_CASE ) @require_torch @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] ,image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : str = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to( __UpperCamelCase ) lowercase_ : int = self.default_image_processor lowercase_ : List[Any] = prepare_video() lowercase_ : str = image_processor(video[:8] ,return_tensors='pt' ).to(__UpperCamelCase ) # forward pass with torch.no_grad(): lowercase_ : List[Any] = model(**__UpperCamelCase ) # verify the logits lowercase_ : List[str] = torch.Size((1, 400) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Any = torch.tensor([-0.3016, -0.7713, -0.4205] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "sail/poolformer_s12": "https://huggingface.co/sail/poolformer_s12/resolve/main/config.json", # See all PoolFormer models at https://huggingface.co/models?filter=poolformer } class UpperCamelCase ( lowercase_ ): lowercase = 'poolformer' def __init__( self ,__UpperCamelCase=3 ,__UpperCamelCase=16 ,__UpperCamelCase=16 ,__UpperCamelCase=3 ,__UpperCamelCase=4.0 ,__UpperCamelCase=[2, 2, 6, 2] ,__UpperCamelCase=[64, 128, 320, 512] ,__UpperCamelCase=[7, 3, 3, 3] ,__UpperCamelCase=[4, 2, 2, 2] ,__UpperCamelCase=[2, 1, 1, 1] ,__UpperCamelCase=4 ,__UpperCamelCase=0.0 ,__UpperCamelCase="gelu" ,__UpperCamelCase=True ,__UpperCamelCase=1e-5 ,__UpperCamelCase=0.02 ,**__UpperCamelCase ,) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = num_channels lowercase_ : Dict = patch_size lowercase_ : Union[str, Any] = stride lowercase_ : List[Any] = padding lowercase_ : List[str] = pool_size lowercase_ : Any = hidden_sizes lowercase_ : str = mlp_ratio lowercase_ : str = depths lowercase_ : List[str] = patch_sizes lowercase_ : Tuple = strides lowercase_ : str = num_encoder_blocks lowercase_ : Optional[Any] = drop_path_rate lowercase_ : List[Any] = hidden_act lowercase_ : Any = use_layer_scale lowercase_ : Tuple = layer_scale_init_value lowercase_ : int = initializer_range super().__init__(**__UpperCamelCase ) class UpperCamelCase ( lowercase_ ): lowercase = version.parse('1.11' ) @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def _UpperCAmelCase ( self ) -> float: '''simple docstring''' return 2e-3
321
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "funnel-transformer/small": "https://huggingface.co/funnel-transformer/small/resolve/main/config.json", "funnel-transformer/small-base": "https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json", "funnel-transformer/medium": "https://huggingface.co/funnel-transformer/medium/resolve/main/config.json", "funnel-transformer/medium-base": "https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json", "funnel-transformer/intermediate": ( "https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json" ), "funnel-transformer/intermediate-base": ( "https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json" ), "funnel-transformer/large": "https://huggingface.co/funnel-transformer/large/resolve/main/config.json", "funnel-transformer/large-base": "https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json", "funnel-transformer/xlarge": "https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json", "funnel-transformer/xlarge-base": "https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json", } class UpperCamelCase ( lowercase_ ): lowercase = 'funnel' lowercase = { 'hidden_size': 'd_model', 'num_attention_heads': 'n_head', } def __init__( self ,__UpperCamelCase=3_0522 ,__UpperCamelCase=[4, 4, 4] ,__UpperCamelCase=None ,__UpperCamelCase=2 ,__UpperCamelCase=768 ,__UpperCamelCase=12 ,__UpperCamelCase=64 ,__UpperCamelCase=3072 ,__UpperCamelCase="gelu_new" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase=None ,__UpperCamelCase=1e-9 ,__UpperCamelCase="mean" ,__UpperCamelCase="relative_shift" ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = vocab_size lowercase_ : Dict = block_sizes lowercase_ : int = [1] * len(__UpperCamelCase ) if block_repeats is None else block_repeats assert len(__UpperCamelCase ) == len( self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length." lowercase_ : List[str] = num_decoder_layers lowercase_ : Union[str, Any] = d_model lowercase_ : Optional[Any] = n_head lowercase_ : str = d_head lowercase_ : int = d_inner lowercase_ : Any = hidden_act lowercase_ : Tuple = hidden_dropout lowercase_ : Union[str, Any] = attention_dropout lowercase_ : List[Any] = activation_dropout lowercase_ : Optional[int] = initializer_range lowercase_ : Optional[int] = initializer_std lowercase_ : Dict = layer_norm_eps assert pooling_type in [ "mean", "max", ], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.''' lowercase_ : int = pooling_type assert attention_type in [ "relative_shift", "factorized", ], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.''' lowercase_ : int = attention_type lowercase_ : Dict = separate_cls lowercase_ : List[str] = truncate_seq lowercase_ : List[str] = pool_q_only super().__init__(**__UpperCamelCase ) @property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return sum(self.block_sizes ) @num_hidden_layers.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' raise NotImplementedError( 'This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`.' ) @property def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return len(self.block_sizes ) @num_blocks.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' raise NotImplementedError('This model does not support the setting of `num_blocks`. Please set `block_sizes`.' )
321
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
1
"""simple docstring""" import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_features'] def __init__( self ,__UpperCamelCase=80 ,__UpperCamelCase=1_6000 ,__UpperCamelCase=160 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=0.0 ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> Tuple: '''simple docstring''' super().__init__( feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[Any] = n_fft lowercase_ : Any = hop_length lowercase_ : Optional[Any] = chunk_length lowercase_ : str = chunk_length * sampling_rate lowercase_ : str = self.n_samples // hop_length lowercase_ : Tuple = sampling_rate lowercase_ : str = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 ,num_mel_filters=__UpperCamelCase ,min_frequency=0.0 ,max_frequency=8000.0 ,sampling_rate=__UpperCamelCase ,norm='slaney' ,mel_scale='slaney' ,) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> np.ndarray: '''simple docstring''' lowercase_ : str = spectrogram( __UpperCamelCase ,window_function(self.n_fft ,'hann' ) ,frame_length=self.n_fft ,hop_length=self.hop_length ,power=2.0 ,mel_filters=self.mel_filters ,log_mel='log10' ,) lowercase_ : Any = log_spec[:, :-1] lowercase_ : int = np.maximum(__UpperCamelCase ,log_spec.max() - 8.0 ) lowercase_ : Any = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def _UpperCAmelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = 0.0 ) -> List[np.ndarray]: '''simple docstring''' if attention_mask is not None: lowercase_ : List[str] = np.array(__UpperCamelCase ,np.intaa ) lowercase_ : List[Any] = [] for vector, length in zip(__UpperCamelCase ,attention_mask.sum(-1 ) ): lowercase_ : Dict = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1e-7 ) if length < normed_slice.shape[0]: lowercase_ : Dict = padding_value normed_input_values.append(__UpperCamelCase ) else: lowercase_ : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1e-7 ) for x in input_values] return normed_input_values def __call__( self ,__UpperCamelCase ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = "max_length" ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) lowercase_ : List[str] = isinstance(__UpperCamelCase ,np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' ) lowercase_ : Optional[int] = is_batched_numpy or ( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : Optional[int] = [np.asarray([speech] ,dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Union[str, Any] = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase_ : Optional[int] = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Union[str, Any] = [np.asarray([raw_speech] ).T] lowercase_ : Tuple = BatchFeature({'input_features': raw_speech} ) # convert into correct format for padding lowercase_ : Any = self.pad( __UpperCamelCase ,padding=__UpperCamelCase ,max_length=max_length if max_length else self.n_samples ,truncation=__UpperCamelCase ,pad_to_multiple_of=__UpperCamelCase ,return_attention_mask=return_attention_mask or do_normalize ,) # zero-mean and unit-variance normalization if do_normalize: lowercase_ : Any = self.zero_mean_unit_var_norm( padded_inputs['input_features'] ,attention_mask=padded_inputs['attention_mask'] ,padding_value=self.padding_value ,) lowercase_ : str = np.stack(padded_inputs['input_features'] ,axis=0 ) # make sure list is in array format lowercase_ : Union[str, Any] = padded_inputs.get('input_features' ).transpose(2 ,0 ,1 ) lowercase_ : Optional[int] = [self._np_extract_fbank_features(__UpperCamelCase ) for waveform in input_features[0]] if isinstance(input_features[0] ,__UpperCamelCase ): lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ) for feature in input_features] else: lowercase_ : Tuple = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase_ : Union[str, Any] = padded_inputs['attention_mask'][:, :: self.hop_length] if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs def _UpperCAmelCase ( self ) -> Dict[str, Any]: '''simple docstring''' lowercase_ : List[str] = copy.deepcopy(self.__dict__ ) lowercase_ : Union[str, Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
321
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" import cmath import math def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): lowercase_ : str = math.radians(__SCREAMING_SNAKE_CASE ) lowercase_ : int = math.radians(__SCREAMING_SNAKE_CASE ) # Convert voltage and current to rectangular form lowercase_ : Optional[int] = cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = cmath.rect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Calculate apparent power return voltage_rect * current_rect if __name__ == "__main__": import doctest doctest.testmod()
321
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
1
"""simple docstring""" import os def lowercase__( ): lowercase_ : int = os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ) , 'num.txt' ) with open(__SCREAMING_SNAKE_CASE ) as file_hand: return str(sum(int(__SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
321
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
1
"""simple docstring""" # Algorithm for the pigeonhole sorting def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = min(__SCREAMING_SNAKE_CASE ) # min() finds the minimum value lowercase_ : List[str] = max(__SCREAMING_SNAKE_CASE ) # max() finds the maximum value lowercase_ : Union[str, Any] = max_val - min_val + 1 # size is difference of max and min values plus one # list of pigeonholes of size equal to the variable size lowercase_ : Optional[int] = [0] * size # Populate the pigeonholes. for x in a: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "integers only please" holes[x - min_val] += 1 # Putting the elements back into the array in an order. lowercase_ : Optional[Any] = 0 for count in range(__SCREAMING_SNAKE_CASE ): while holes[count] > 0: holes[count] -= 1 lowercase_ : Union[str, Any] = count + min_val i += 1 def lowercase__( ): lowercase_ : Optional[Any] = [8, 3, 2, 7, 4, 6, 8] pigeonhole_sort(__SCREAMING_SNAKE_CASE ) print('Sorted order is:' , ' '.join(__SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": main()
321
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
1
"""simple docstring""" import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase : lowercase = None @experimental def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return _map_with_joblib(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int ): lowercase_ : List[str] = num_proc if num_proc <= len(__SCREAMING_SNAKE_CASE ) else len(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = [] # We organize the splits ourselve (contiguous splits) for index in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) // num_proc lowercase_ : Any = len(__SCREAMING_SNAKE_CASE ) % num_proc lowercase_ : Union[str, Any] = div * index + min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(__SCREAMING_SNAKE_CASE ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( F'''Error dividing inputs iterable among processes. ''' F'''Total number of objects {len(__SCREAMING_SNAKE_CASE )}, ''' F'''length: {sum(len(i[1] ) for i in split_kwds )}''' ) logger.info( F'''Spawning {num_proc} processes for {len(__SCREAMING_SNAKE_CASE )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' ) lowercase_ , lowercase_ : List[str] = None, None if not disable_tqdm: lowercase_ , lowercase_ : List[Any] = (RLock(),), tqdm.set_lock with Pool(__SCREAMING_SNAKE_CASE , initargs=__SCREAMING_SNAKE_CASE , initializer=__SCREAMING_SNAKE_CASE ) as pool: lowercase_ : Tuple = pool.map(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) logger.info(F'''Finished {num_proc} processes''' ) lowercase_ : Optional[int] = [obj for proc_res in mapped for obj in proc_res] logger.info(F'''Unpacked {len(__SCREAMING_SNAKE_CASE )} objects''' ) return mapped def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): # progress bar is not yet supported for _map_with_joblib, because tqdm couldn't accurately be applied to joblib, # and it requires monkey-patching joblib internal classes which is subject to change import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=__SCREAMING_SNAKE_CASE ): return joblib.Parallel()( joblib.delayed(__SCREAMING_SNAKE_CASE )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Any = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: lowercase_ : List[Any] = None
321
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
1
"""simple docstring""" from itertools import zip_longest import requests from bsa import BeautifulSoup from pandas import DataFrame def lowercase__( __SCREAMING_SNAKE_CASE : str = "laptop" ): lowercase_ : int = F'''https://www.amazon.in/laptop/s?k={product}''' lowercase_ : str = { 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36', 'Accept-Language': 'en-US, en;q=0.5', } lowercase_ : str = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE , headers=__SCREAMING_SNAKE_CASE ).text ) # Initialize a Pandas dataframe with the column titles lowercase_ : List[str] = DataFrame( columns=[ 'Product Title', 'Product Link', 'Current Price of the product', 'Product Rating', 'MRP of the product', 'Discount', ] ) # Loop through each entry and store them in the dataframe for item, _ in zip_longest( soup.find_all( 'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ): try: lowercase_ : Dict = item.ha.text lowercase_ : Union[str, Any] = 'https://www.amazon.in/' + item.ha.a['href'] lowercase_ : Optional[int] = item.find('span' , attrs={'class': 'a-offscreen'} ).text try: lowercase_ : Tuple = item.find('span' , attrs={'class': 'a-icon-alt'} ).text except AttributeError: lowercase_ : Dict = 'Not available' try: lowercase_ : Dict = ( '₹' + item.find( 'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1] ) except AttributeError: lowercase_ : Optional[int] = '' try: lowercase_ : Dict = float( ( ( float(product_mrp.strip('₹' ).replace(',' , '' ) ) - float(product_price.strip('₹' ).replace(',' , '' ) ) ) / float(product_mrp.strip('₹' ).replace(',' , '' ) ) ) * 1_00 ) except ValueError: lowercase_ : Any = float('nan' ) except AttributeError: pass lowercase_ : Optional[int] = [ product_title, product_link, product_price, product_rating, product_mrp, discount, ] lowercase_ : List[Any] = ' ' lowercase_ : Tuple = ' ' data_frame.index += 1 return data_frame if __name__ == "__main__": __SCREAMING_SNAKE_CASE ="headphones" get_amazon_product_data(product).to_csv(F"Amazon Product Data for {product}.csv")
321
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
1
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = XLMTokenizer lowercase = False def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : Dict = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'w</w>', 'r</w>', 't</w>', 'lo', 'low', 'er</w>', 'low</w>', 'lowest</w>', 'newer</w>', 'wider</w>', '<unk>', ] lowercase_ : Dict = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['l o 123', 'lo w 1456', 'e r</w> 1789', ''] lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ) as fp: fp.write(json.dumps(__UpperCamelCase ) ) with open(self.merges_file ,'w' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = 'lower newer' lowercase_ : Any = 'lower newer' return input_text, output_text def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = XLMTokenizer(self.vocab_file ,self.merges_file ) lowercase_ : str = 'lower' lowercase_ : Union[str, Any] = ['low', 'er</w>'] lowercase_ : List[str] = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Tuple = tokens + ['<unk>'] lowercase_ : str = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' ) lowercase_ : Union[str, Any] = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCamelCase ) lowercase_ : Tuple = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ) lowercase_ : str = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
321
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
1
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=64 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = parent lowercase_ : List[str] = batch_size lowercase_ : Union[str, Any] = seq_length lowercase_ : str = is_training lowercase_ : Optional[Any] = use_input_mask lowercase_ : List[Any] = use_token_type_ids lowercase_ : Dict = use_labels lowercase_ : Optional[Any] = vocab_size lowercase_ : Optional[int] = hidden_size lowercase_ : List[str] = embedding_size lowercase_ : Optional[Any] = num_hidden_layers lowercase_ : Tuple = num_attention_heads lowercase_ : Any = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : Optional[int] = attention_probs_dropout_prob lowercase_ : str = max_position_embeddings lowercase_ : Optional[Any] = type_vocab_size lowercase_ : Dict = type_sequence_label_size lowercase_ : int = initializer_range lowercase_ : Any = num_labels lowercase_ : int = num_choices lowercase_ : str = scope def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : int = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Union[str, Any] = None if self.use_token_type_ids: lowercase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) lowercase_ : Tuple = None lowercase_ : Dict = None lowercase_ : List[str] = None if self.use_labels: lowercase_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : Optional[int] = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : Any = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = MobileBertModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ,token_type_ids=__UpperCamelCase ) lowercase_ : List[str] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Tuple = MobileBertForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Any = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = MobileBertForNextSentencePrediction(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Tuple = MobileBertForPreTraining(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[int] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,next_sentence_label=__UpperCamelCase ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Tuple = MobileBertForQuestionAnswering(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Any = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,start_positions=__UpperCamelCase ,end_positions=__UpperCamelCase ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = self.num_labels lowercase_ : Optional[int] = MobileBertForSequenceClassification(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = self.num_labels lowercase_ : Optional[int] = MobileBertForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Any = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : int = self.num_choices lowercase_ : Tuple = MobileBertForMultipleChoice(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,token_type_ids=__UpperCamelCase ,labels=__UpperCamelCase ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : List[str] = config_and_inputs lowercase_ : Union[str, Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) lowercase = ( { 'feature-extraction': MobileBertModel, 'fill-mask': MobileBertForMaskedLM, 'question-answering': MobileBertForQuestionAnswering, 'text-classification': MobileBertForSequenceClassification, 'token-classification': MobileBertForTokenClassification, 'zero-shot': MobileBertForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=False ) -> Dict: '''simple docstring''' lowercase_ : int = super()._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ,return_labels=__UpperCamelCase ) if return_labels: if model_class in get_values(__UpperCamelCase ): lowercase_ : Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=__UpperCamelCase ) lowercase_ : Optional[int] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=__UpperCamelCase ) return inputs_dict def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Dict = MobileBertModelTester(self ) lowercase_ : Any = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ): return torch.tensor( __SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE , ) __SCREAMING_SNAKE_CASE =1E-3 @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Any = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(__UpperCamelCase ) lowercase_ : int = _long_tensor([[101, 7110, 1005, 1056, 2023, 1_1333, 1_7413, 1029, 102]] ) with torch.no_grad(): lowercase_ : List[Any] = model(__UpperCamelCase )[0] lowercase_ : Union[str, Any] = torch.Size((1, 9, 512) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [ [ [-2.4736526e07, 8.2691656e04, 1.6521838e05], [-5.7541704e-01, 3.9056022e00, 4.4011507e00], [2.6047359e00, 1.5677652e00, -1.7324188e-01], ] ] ,device=__UpperCamelCase ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lowercase_ : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) lowercase_ : Dict = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
321
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
1
"""simple docstring""" import argparse import fairseq import torch from torch import nn from transformers import ( MBartaaTokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaModel, logging, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } __SCREAMING_SNAKE_CASE =[ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple ): for attribute in key.split('.' ): lowercase_ : Tuple = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if weight_type is not None: lowercase_ : str = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape else: lowercase_ : int = hf_pointer.shape assert hf_shape == value.shape, ( F'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' F''' {value.shape} for {full_name}''' ) if weight_type == "weight": lowercase_ : Tuple = value elif weight_type == "weight_g": lowercase_ : Union[str, Any] = value elif weight_type == "weight_v": lowercase_ : List[Any] = value elif weight_type == "bias": lowercase_ : Tuple = value else: lowercase_ : Dict = value logger.info(F'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : Any = [] lowercase_ : Any = fairseq_model.state_dict() lowercase_ : List[str] = hf_model.feature_extractor lowercase_ : Optional[int] = hf_model.adapter for name, value in fairseq_dict.items(): lowercase_ : int = False if "conv_layers" in name: load_conv_layer( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == 'group' , ) lowercase_ : Optional[Any] = True elif any(x in name for x in ['adaptor', 'w2v_encoder.proj.', 'w2v_proj_ln.'] ): load_adapter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Any = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]: lowercase_ : Optional[int] = True if "*" in mapped_key: lowercase_ : List[str] = name.split(__SCREAMING_SNAKE_CASE )[0].split('.' )[-2] lowercase_ : Union[str, Any] = mapped_key.replace('*' , __SCREAMING_SNAKE_CASE ) if "weight_g" in name: lowercase_ : Union[str, Any] = 'weight_g' elif "weight_v" in name: lowercase_ : Dict = 'weight_v' elif "bias" in name: lowercase_ : Optional[int] = 'bias' elif "weight" in name: lowercase_ : List[Any] = 'weight' else: lowercase_ : List[Any] = None set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) continue if not is_used: unused_weights.append(__SCREAMING_SNAKE_CASE ) logger.warning(F'''Unused weights: {unused_weights}''' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] ): lowercase_ : Any = full_name.split('conv_layers.' )[-1] lowercase_ : Optional[Any] = name.split('.' ) lowercase_ : str = int(items[0] ) lowercase_ : Optional[int] = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) lowercase_ : Union[str, Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) lowercase_ : Optional[Any] = value logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) lowercase_ : Optional[Any] = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( F'''{full_name} has size {value.shape}, but''' F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) lowercase_ : Any = value logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : Tuple = full_name.split('adaptor.' )[-1] lowercase_ : Any = name.split('.' ) if items[1].isdigit(): lowercase_ : int = int(items[1] ) else: lowercase_ : Any = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found.''' lowercase_ : Optional[Any] = value logger.info(F'''Adapter proj layer norm bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found.''' lowercase_ : Optional[Any] = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found.''' lowercase_ : Union[str, Any] = value logger.info(F'''Adapter proj layer bias was initialized from {full_name}.''' ) if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found.''' lowercase_ : Dict = value logger.info(F'''Adapter proj layer weight was initialized from {full_name}.''' ) elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found.''' lowercase_ : int = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), F'''{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found.''' lowercase_ : str = value logger.info(F'''Adapter layer {layer_id} bias was initialized from {full_name}.''' ) else: unused_weights.append(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ , lowercase_ : Union[str, Any] = emb.weight.shape lowercase_ : Dict = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = emb.weight.data return lin_layer @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , ): lowercase_ : List[Any] = WavaVecaConfig.from_pretrained( __SCREAMING_SNAKE_CASE , add_adapter=__SCREAMING_SNAKE_CASE , adapter_stride=__SCREAMING_SNAKE_CASE , adapter_kernel_size=__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE , output_hidden_size=__SCREAMING_SNAKE_CASE , ) lowercase_ : Dict = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE ) # load model lowercase_ , lowercase_ , lowercase_ : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={ 'config_yaml': config_yaml_path, 'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path, 'load_pretrained_decoder_from': None, } , ) lowercase_ : int = model[0].eval() # load feature extractor lowercase_ : Any = WavaVecaFeatureExtractor.from_pretrained(__SCREAMING_SNAKE_CASE , use_auth_token=__SCREAMING_SNAKE_CASE ) # set weights for wav2vec2 encoder lowercase_ : Tuple = WavaVecaModel(__SCREAMING_SNAKE_CASE ) recursively_load_weights_wavaveca(model.encoder , __SCREAMING_SNAKE_CASE ) # load decoder weights lowercase_ : int = MBartForCausalLM(__SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : int = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=__SCREAMING_SNAKE_CASE ) logger.warning(F'''The following keys are missing when loading the decoder weights: {missing_keys}''' ) logger.warning(F'''The following keys are unexpected when loading the decoder weights: {unexpected_keys}''' ) lowercase_ : List[str] = SpeechEncoderDecoderModel(encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = False lowercase_ : List[str] = MBartaaTokenizer(__SCREAMING_SNAKE_CASE ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = hf_wavavec.config.to_dict() lowercase_ : Dict = tokenizer.pad_token_id lowercase_ : List[str] = tokenizer.bos_token_id lowercase_ : Any = tokenizer.eos_token_id lowercase_ : Optional[int] = 'mbart50' lowercase_ : List[str] = 'wav2vec2' lowercase_ : str = tokenizer.eos_token_id lowercase_ : Any = 25_00_04 lowercase_ : Union[str, Any] = tokenizer.eos_token_id lowercase_ : Dict = SpeechEncoderDecoderConfig.from_dict(__SCREAMING_SNAKE_CASE ) hf_wavavec.save_pretrained(__SCREAMING_SNAKE_CASE ) feature_extractor.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=25_0004, type=int, help="`decoder_start_token_id` of model config") __SCREAMING_SNAKE_CASE =parser.parse_args() convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
321
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
1
"""simple docstring""" import json import os from functools import lru_cache from typing import List, Optional, Tuple import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all BART models at https://huggingface.co/models?filter=bart __SCREAMING_SNAKE_CASE ={ "vocab_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json", }, "merges_file": { "facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt", "facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt", "facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt", "facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt", "facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt", "yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt", }, } __SCREAMING_SNAKE_CASE ={ "facebook/bart-base": 1024, "facebook/bart-large": 1024, "facebook/bart-large-mnli": 1024, "facebook/bart-large-cnn": 1024, "facebook/bart-large-xsum": 1024, "yjernite/bart_eli5": 1024, } @lru_cache() def lowercase__( ): lowercase_ : List[Any] = ( list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) ) ) lowercase_ : Optional[int] = bs[:] lowercase_ : List[str] = 0 for b in range(2**8 ): if b not in bs: bs.append(__SCREAMING_SNAKE_CASE ) cs.append(2**8 + n ) n += 1 lowercase_ : Any = [chr(__SCREAMING_SNAKE_CASE ) for n in cs] return dict(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): lowercase_ : Optional[Any] = set() lowercase_ : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) lowercase_ : int = char return pairs class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : Optional[Any] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else bos_token lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else eos_token lowercase_ : Dict = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else sep_token lowercase_ : List[str] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else cls_token lowercase_ : str = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else unk_token lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else pad_token # Mask token behave like a normal word, i.e. include the space before it lowercase_ : Dict = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token super().__init__( errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,**__UpperCamelCase ,) with open(__UpperCamelCase ,encoding='utf-8' ) as vocab_handle: lowercase_ : List[str] = json.load(__UpperCamelCase ) lowercase_ : List[Any] = {v: k for k, v in self.encoder.items()} lowercase_ : str = errors # how to handle errors in decoding lowercase_ : int = bytes_to_unicode() lowercase_ : Optional[int] = {v: k for k, v in self.byte_encoder.items()} with open(__UpperCamelCase ,encoding='utf-8' ) as merges_handle: lowercase_ : Dict = merges_handle.read().split('\n' )[1:-1] lowercase_ : Dict = [tuple(merge.split() ) for merge in bpe_merges] lowercase_ : List[Any] = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : int = {} lowercase_ : List[Any] = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions lowercase_ : str = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' ) @property def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return len(self.encoder ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return dict(self.encoder ,**self.added_tokens_encoder ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' if token in self.cache: return self.cache[token] lowercase_ : str = tuple(__UpperCamelCase ) lowercase_ : Optional[int] = get_pairs(__UpperCamelCase ) if not pairs: return token while True: lowercase_ : List[str] = min(__UpperCamelCase ,key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase ,float('inf' ) ) ) if bigram not in self.bpe_ranks: break lowercase_ , lowercase_ : Tuple = bigram lowercase_ : Dict = [] lowercase_ : Optional[Any] = 0 while i < len(__UpperCamelCase ): try: lowercase_ : int = word.index(__UpperCamelCase ,__UpperCamelCase ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) lowercase_ : List[Any] = j if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 lowercase_ : Dict = tuple(__UpperCamelCase ) lowercase_ : List[Any] = new_word if len(__UpperCamelCase ) == 1: break else: lowercase_ : Tuple = get_pairs(__UpperCamelCase ) lowercase_ : str = ' '.join(__UpperCamelCase ) lowercase_ : Optional[int] = word return word def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Union[str, Any] = [] for token in re.findall(self.pat ,__UpperCamelCase ): lowercase_ : Any = ''.join( self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__UpperCamelCase ).split(' ' ) ) return bpe_tokens def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return self.encoder.get(__UpperCamelCase ,self.encoder.get(self.unk_token ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.decoder.get(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = ''.join(__UpperCamelCase ) lowercase_ : str = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' ,errors=self.errors ) return text def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : Any = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : Any = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] ) with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as f: f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=__UpperCamelCase ,ensure_ascii=__UpperCamelCase ) + '\n' ) lowercase_ : Dict = 0 with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as writer: writer.write('#version: 0.2\n' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda __UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' ' Please check that the tokenizer is not corrupted!' ) lowercase_ : Any = token_index writer.write(' '.join(__UpperCamelCase ) + '\n' ) index += 1 return vocab_file, merge_file def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ : Union[str, Any] = [self.cls_token_id] lowercase_ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase ) if token_ids_a is None: return [1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : Dict = [self.sep_token_id] lowercase_ : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,**__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : str = kwargs.pop('add_prefix_space' ,self.add_prefix_space ) if (is_split_into_words or add_prefix_space) and (len(__UpperCamelCase ) > 0 and not text[0].isspace()): lowercase_ : Any = ' ' + text return (text, kwargs)
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
1
"""simple docstring""" import os import unittest from transformers import MobileBertTokenizer, MobileBertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = MobileBertTokenizer lowercase = MobileBertTokenizerFast lowercase = True lowercase = True lowercase = filter_non_english lowercase = 'google/mobilebert-uncased' def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' super().setUp() lowercase_ : Tuple = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowercase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) lowercase_ : int = [ (tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped for tokenizer_def in self.tokenizers_list ] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = 'UNwant\u00E9d,running' lowercase_ : Union[str, Any] = 'unwanted, running' return input_text, output_text def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = self.tokenizer_class(self.vocab_file ) lowercase_ : Dict = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(__UpperCamelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,[9, 6, 7, 12, 10, 11] ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : Union[str, Any] = self.get_rust_tokenizer() lowercase_ : Tuple = 'UNwant\u00E9d,running' lowercase_ : str = tokenizer.tokenize(__UpperCamelCase ) lowercase_ : Optional[int] = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Optional[int] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) lowercase_ : int = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Tuple = self.get_rust_tokenizer() lowercase_ : str = tokenizer.encode(__UpperCamelCase ) lowercase_ : int = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) # With lower casing lowercase_ : Tuple = self.get_tokenizer(do_lower_case=__UpperCamelCase ) lowercase_ : Optional[int] = self.get_rust_tokenizer(do_lower_case=__UpperCamelCase ) lowercase_ : Dict = 'UNwant\u00E9d,running' lowercase_ : Dict = tokenizer.tokenize(__UpperCamelCase ) lowercase_ : Optional[Any] = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Any = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) lowercase_ : Tuple = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Optional[Any] = self.get_rust_tokenizer() lowercase_ : Optional[Any] = tokenizer.encode(__UpperCamelCase ) lowercase_ : Union[str, Any] = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Dict = BasicTokenizer() self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : str = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] ) self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = BasicTokenizer(do_lower_case=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase ) self.assertListEqual( tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[Any] = BasicTokenizer(do_lower_case=__UpperCamelCase ,never_split=['[UNK]'] ) self.assertListEqual( tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing'] lowercase_ : str = {} for i, token in enumerate(__UpperCamelCase ): lowercase_ : Tuple = i lowercase_ : Optional[Any] = WordpieceTokenizer(vocab=__UpperCamelCase ,unk_token='[UNK]' ) self.assertListEqual(tokenizer.tokenize('' ) ,[] ) self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] ) self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' self.assertTrue(_is_whitespace(' ' ) ) self.assertTrue(_is_whitespace('\t' ) ) self.assertTrue(_is_whitespace('\r' ) ) self.assertTrue(_is_whitespace('\n' ) ) self.assertTrue(_is_whitespace('\u00A0' ) ) self.assertFalse(_is_whitespace('A' ) ) self.assertFalse(_is_whitespace('-' ) ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.assertTrue(_is_control('\u0005' ) ) self.assertFalse(_is_control('A' ) ) self.assertFalse(_is_control(' ' ) ) self.assertFalse(_is_control('\t' ) ) self.assertFalse(_is_control('\r' ) ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' self.assertTrue(_is_punctuation('-' ) ) self.assertTrue(_is_punctuation('$' ) ) self.assertTrue(_is_punctuation('`' ) ) self.assertTrue(_is_punctuation('.' ) ) self.assertFalse(_is_punctuation('A' ) ) self.assertFalse(_is_punctuation(' ' ) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Tuple = self.get_tokenizer() lowercase_ : Optional[int] = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(__UpperCamelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) self.assertListEqual( [rust_tokenizer.tokenize(__UpperCamelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] ) @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : int = self.tokenizer_class.from_pretrained('google/mobilebert-uncased' ) lowercase_ : Any = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCamelCase ) lowercase_ : Dict = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCamelCase ) lowercase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ) lowercase_ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase ) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_a + [102] def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Any = f'''A, naïve {tokenizer_r.mask_token} AllenNLP sentence.''' lowercase_ : Optional[int] = tokenizer_r.encode_plus( __UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,) lowercase_ : Optional[Any] = tokenizer_r.do_lower_case if hasattr(__UpperCamelCase ,'do_lower_case' ) else False lowercase_ : int = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'A'), ((1, 2), ','), ((3, 5), 'na'), ((5, 6), '##ï'), ((6, 8), '##ve'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'Allen'), ((21, 23), '##NL'), ((23, 24), '##P'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), 'a'), ((1, 2), ','), ((3, 8), 'naive'), ((9, 15), tokenizer_r.mask_token), ((16, 21), 'allen'), ((21, 23), '##nl'), ((23, 24), '##p'), ((25, 33), 'sentence'), ((33, 34), '.'), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) ) self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[int] = ['的', '人', '有'] lowercase_ : Optional[int] = ''.join(__UpperCamelCase ) for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Union[str, Any] = True lowercase_ : List[Any] = self.tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Dict = tokenizer_p.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) lowercase_ : int = tokenizer_r.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) lowercase_ : int = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase ) lowercase_ : Tuple = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase ) # it is expected that each Chinese character is not preceded by "##" self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Union[str, Any] = False lowercase_ : Tuple = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : int = self.tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = tokenizer_r.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) lowercase_ : List[str] = tokenizer_p.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) lowercase_ : Any = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase ) lowercase_ : str = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase ) # it is expected that only the first Chinese character is not preceded by "##". lowercase_ : Optional[Any] = [ f'''##{token}''' if idx != 0 else token for idx, token in enumerate(__UpperCamelCase ) ] self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
1
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : str = [0 for i in range(len(__SCREAMING_SNAKE_CASE ) )] # initialize interval's left pointer and right pointer lowercase_ , lowercase_ : List[str] = 0, 0 for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ): # case when current index is inside the interval if i <= right_pointer: lowercase_ : int = min(right_pointer - i + 1 , z_result[i - left_pointer] ) lowercase_ : Tuple = min_edge while go_next(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): z_result[i] += 1 # if new index's result gives us more right interval, # we've to update left_pointer and right_pointer if i + z_result[i] - 1 > right_pointer: lowercase_ , lowercase_ : List[Any] = i, i + z_result[i] - 1 return z_result def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : str ): return i + z_result[i] < len(__SCREAMING_SNAKE_CASE ) and s[z_result[i]] == s[i + z_result[i]] def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = 0 # concatenate 'pattern' and 'input_str' and call z_function # with concatenated string lowercase_ : Tuple = z_function(pattern + input_str ) for val in z_result: # if value is greater then length of the pattern string # that means this index is starting position of substring # which is equal to pattern string if val >= len(__SCREAMING_SNAKE_CASE ): answer += 1 return answer if __name__ == "__main__": import doctest doctest.testmod()
321
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "microsoft/swinv2-tiny-patch4-window8-256": ( "https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json" ), } class UpperCamelCase ( lowercase_ ): lowercase = 'swinv2' lowercase = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self ,__UpperCamelCase=224 ,__UpperCamelCase=4 ,__UpperCamelCase=3 ,__UpperCamelCase=96 ,__UpperCamelCase=[2, 2, 6, 2] ,__UpperCamelCase=[3, 6, 12, 24] ,__UpperCamelCase=7 ,__UpperCamelCase=4.0 ,__UpperCamelCase=True ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.0 ,__UpperCamelCase=0.1 ,__UpperCamelCase="gelu" ,__UpperCamelCase=False ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-5 ,__UpperCamelCase=32 ,**__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : str = image_size lowercase_ : List[Any] = patch_size lowercase_ : Any = num_channels lowercase_ : int = embed_dim lowercase_ : Optional[int] = depths lowercase_ : List[str] = len(__UpperCamelCase ) lowercase_ : int = num_heads lowercase_ : Union[str, Any] = window_size lowercase_ : str = mlp_ratio lowercase_ : int = qkv_bias lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Dict = drop_path_rate lowercase_ : Dict = hidden_act lowercase_ : List[Any] = use_absolute_embeddings lowercase_ : str = layer_norm_eps lowercase_ : Dict = initializer_range lowercase_ : List[Any] = encoder_stride # we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model lowercase_ : Optional[Any] = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) ) lowercase_ : List[Any] = (0, 0, 0, 0)
321
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_squeezebert import SqueezeBertTokenizer __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt" ), }, "tokenizer_file": { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli": ( "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json" ), "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json" ), }, } __SCREAMING_SNAKE_CASE ={ "squeezebert/squeezebert-uncased": 512, "squeezebert/squeezebert-mnli": 512, "squeezebert/squeezebert-mnli-headless": 512, } __SCREAMING_SNAKE_CASE ={ "squeezebert/squeezebert-uncased": {"do_lower_case": True}, "squeezebert/squeezebert-mnli": {"do_lower_case": True}, "squeezebert/squeezebert-mnli-headless": {"do_lower_case": True}, } class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = SqueezeBertTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=True ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase=True ,__UpperCamelCase=None ,**__UpperCamelCase ,) -> int: '''simple docstring''' super().__init__( __UpperCamelCase ,tokenizer_file=__UpperCamelCase ,do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,tokenize_chinese_chars=__UpperCamelCase ,strip_accents=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('lowercase' ,__UpperCamelCase ) != do_lower_case or normalizer_state.get('strip_accents' ,__UpperCamelCase ) != strip_accents or normalizer_state.get('handle_chinese_chars' ,__UpperCamelCase ) != tokenize_chinese_chars ): lowercase_ : List[str] = getattr(__UpperCamelCase ,normalizer_state.pop('type' ) ) lowercase_ : List[str] = do_lower_case lowercase_ : str = strip_accents lowercase_ : Optional[Any] = tokenize_chinese_chars lowercase_ : Optional[Any] = normalizer_class(**__UpperCamelCase ) lowercase_ : Any = do_lower_case def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> int: '''simple docstring''' lowercase_ : Tuple = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : List[str] = [self.sep_token_id] lowercase_ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Optional[int] = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase )
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
1
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int = 10_00 ): lowercase_ : Optional[Any] = -1 lowercase_ : Optional[int] = 0 for a in range(1 , n // 3 ): # Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c lowercase_ : List[Any] = (n * n - 2 * a * n) // (2 * n - 2 * a) lowercase_ : Dict = n - a - b if c * c == (a * a + b * b): lowercase_ : List[str] = a * b * c if candidate >= product: lowercase_ : str = candidate return product if __name__ == "__main__": print(F"{solution() = }")
321
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
1
"""simple docstring""" import requests from bsa import BeautifulSoup def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus" ): lowercase_ : int = BeautifulSoup(requests.get(__SCREAMING_SNAKE_CASE ).text , 'html.parser' ) lowercase_ : List[str] = soup.findAll('h1' ) lowercase_ : List[str] = soup.findAll('div' , {'class': 'maincounter-number'} ) keys += soup.findAll('span' , {'class': 'panel-title'} ) values += soup.findAll('div' , {'class': 'number-table-main'} ) return {key.text.strip(): value.text.strip() for key, value in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )} if __name__ == "__main__": print("\033[1m" + "COVID-19 Status of the World" + "\033[0m\n") for key, value in world_covidaa_stats().items(): print(F"{key}\n{value}\n")
321
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
1
"""simple docstring""" import unittest from transformers import RoFormerTokenizer, RoFormerTokenizerFast from transformers.testing_utils import require_rjieba, require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_rjieba @require_tokenizers class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = RoFormerTokenizer lowercase = RoFormerTokenizerFast lowercase = True lowercase = True def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' super().setUp() def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return self.rust_tokenizer_class.from_pretrained('junnyu/roformer_chinese_base' ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = '永和服装饰品有限公司,今天天气非常好' lowercase_ : int = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好' return input_text, output_text def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = self.get_tokenizer() lowercase_ , lowercase_ : str = self.get_chinese_input_output_texts() lowercase_ : Any = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,output_text.split() ) lowercase_ : Optional[int] = tokens + [tokenizer.unk_token] lowercase_ : Union[str, Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Tuple = self.get_rust_tokenizer() lowercase_ , lowercase_ : int = self.get_chinese_input_output_texts() lowercase_ : Union[str, Any] = tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,output_text.split() ) lowercase_ : str = tokens + [tokenizer.unk_token] lowercase_ : Optional[Any] = [2_2943, 2_1332, 3_4431, 4_5904, 117, 306, 1231, 1231, 2653, 3_3994, 1266, 100] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' pass
321
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) # pylint: disable=invalid-name __SCREAMING_SNAKE_CASE =256 class UpperCamelCase ( lowercase_ ): lowercase = ['melgan'] def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> None: '''simple docstring''' super().__init__() # From MELGAN lowercase_ : Dict = math.log(1e-5 ) # Matches MelGAN training. lowercase_ : int = 4.0 # Largest value for most examples lowercase_ : List[str] = 128 self.register_modules( notes_encoder=__UpperCamelCase ,continuous_encoder=__UpperCamelCase ,decoder=__UpperCamelCase ,scheduler=__UpperCamelCase ,melgan=__UpperCamelCase ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=(-1.0, 1.0) ,__UpperCamelCase=False ) -> int: '''simple docstring''' lowercase_ , lowercase_ : Dict = output_range if clip: lowercase_ : Optional[int] = torch.clip(__UpperCamelCase ,self.min_value ,self.max_value ) # Scale to [0, 1]. lowercase_ : Optional[int] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=(-1.0, 1.0) ,__UpperCamelCase=False ) -> Union[str, Any]: '''simple docstring''' lowercase_ , lowercase_ : List[Any] = input_range lowercase_ : int = torch.clip(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) if clip else outputs # Scale to [0, 1]. lowercase_ : List[str] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = input_tokens > 0 lowercase_ , lowercase_ : Dict = self.notes_encoder( encoder_input_tokens=__UpperCamelCase ,encoder_inputs_mask=__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.continuous_encoder( encoder_inputs=__UpperCamelCase ,encoder_inputs_mask=__UpperCamelCase ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Dict = noise_time if not torch.is_tensor(__UpperCamelCase ): lowercase_ : List[Any] = torch.tensor([timesteps] ,dtype=torch.long ,device=input_tokens.device ) elif torch.is_tensor(__UpperCamelCase ) and len(timesteps.shape ) == 0: lowercase_ : int = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase_ : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] ,dtype=timesteps.dtype ,device=timesteps.device ) lowercase_ : int = self.decoder( encodings_and_masks=__UpperCamelCase ,decoder_input_tokens=__UpperCamelCase ,decoder_noise_time=__UpperCamelCase ) return logits @torch.no_grad() def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = 100 ,__UpperCamelCase = True ,__UpperCamelCase = "numpy" ,__UpperCamelCase = None ,__UpperCamelCase = 1 ,) -> Union[AudioPipelineOutput, Tuple]: '''simple docstring''' if (callback_steps is None) or ( callback_steps is not None and (not isinstance(__UpperCamelCase ,__UpperCamelCase ) or callback_steps <= 0) ): raise ValueError( f'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' f''' {type(__UpperCamelCase )}.''' ) lowercase_ : Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] ,dtype=np.floataa ) lowercase_ : int = np.zeros([1, 0, self.n_dims] ,np.floataa ) lowercase_ : Optional[Any] = torch.ones((1, TARGET_FEATURE_LENGTH) ,dtype=__UpperCamelCase ,device=self.device ) for i, encoder_input_tokens in enumerate(__UpperCamelCase ): if i == 0: lowercase_ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device ,dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase_ : str = torch.zeros((1, TARGET_FEATURE_LENGTH) ,dtype=__UpperCamelCase ,device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase_ : Dict = ones lowercase_ : List[str] = self.scale_features( __UpperCamelCase ,output_range=[-1.0, 1.0] ,clip=__UpperCamelCase ) lowercase_ : List[Any] = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) ,continuous_inputs=__UpperCamelCase ,continuous_mask=__UpperCamelCase ,) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase_ : str = randn_tensor( shape=encoder_continuous_inputs.shape ,generator=__UpperCamelCase ,device=self.device ,dtype=self.decoder.dtype ,) # set step values self.scheduler.set_timesteps(__UpperCamelCase ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase_ : List[Any] = self.decode( encodings_and_masks=__UpperCamelCase ,input_tokens=__UpperCamelCase ,noise_time=t / self.scheduler.config.num_train_timesteps ,) # Compute previous output: x_t -> x_t-1 lowercase_ : List[str] = self.scheduler.step(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,generator=__UpperCamelCase ).prev_sample lowercase_ : Tuple = self.scale_to_features(__UpperCamelCase ,input_range=[-1.0, 1.0] ) lowercase_ : Optional[int] = mel[:1] lowercase_ : List[Any] = mel.cpu().float().numpy() lowercase_ : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] ,axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(__UpperCamelCase ,__UpperCamelCase ) logger.info('Generated segment' ,__UpperCamelCase ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( 'Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.' ) elif output_type == "numpy" and self.melgan is None: raise ValueError( 'Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.' ) if output_type == "numpy": lowercase_ : int = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase_ : int = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=__UpperCamelCase )
321
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
1
"""simple docstring""" import asyncio import os import shutil import subprocess import sys import tempfile import unittest from distutils.util import strtobool from functools import partial from pathlib import Path from typing import List, Union from unittest import mock import torch from ..state import AcceleratorState, PartialState from ..utils import ( gather, is_bnb_available, is_comet_ml_available, is_datasets_available, is_deepspeed_available, is_mps_available, is_safetensors_available, is_tensorboard_available, is_torch_version, is_tpu_available, is_transformers_available, is_wandb_available, is_xpu_available, ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any]=False ): try: lowercase_ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase_ : List[Any] = default else: # KEY is set, convert it to True or False. try: lowercase_ : Dict = strtobool(__SCREAMING_SNAKE_CASE ) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(F'''If set, {key} must be yes or no.''' ) return _value __SCREAMING_SNAKE_CASE =parse_flag_from_env("RUN_SLOW", default=False) def lowercase__( __SCREAMING_SNAKE_CASE : int ): return unittest.skip('Test was skipped' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str ): return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int ): return unittest.skipUnless( is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str ): return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any ): return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] ): return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Tuple=None ): if test_case is None: return partial(__SCREAMING_SNAKE_CASE , version=__SCREAMING_SNAKE_CASE ) return unittest.skipUnless(is_torch_version('>=' , __SCREAMING_SNAKE_CASE ) , F'''test requires torch version >= {version}''' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Tuple ): return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] ): return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__SCREAMING_SNAKE_CASE ) __SCREAMING_SNAKE_CASE =( any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available() ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] ): return unittest.skipUnless( _atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__SCREAMING_SNAKE_CASE ) class UpperCamelCase ( unittest.TestCase ): lowercase = True @classmethod def _UpperCAmelCase ( cls ) -> int: '''simple docstring''' lowercase_ : Optional[int] = tempfile.mkdtemp() @classmethod def _UpperCAmelCase ( cls ) -> int: '''simple docstring''' if os.path.exists(cls.tmpdir ): shutil.rmtree(cls.tmpdir ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if self.clear_on_setup: for path in Path(self.tmpdir ).glob('**/*' ): if path.is_file(): path.unlink() elif path.is_dir(): shutil.rmtree(__UpperCamelCase ) class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> int: '''simple docstring''' super().tearDown() # Reset the state of the AcceleratorState singleton. AcceleratorState._reset_state() PartialState._reset_state() class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = mocks if isinstance(__UpperCamelCase ,(tuple, list) ) else [mocks] for m in self.mocks: m.start() self.addCleanup(m.stop ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] ): lowercase_ : int = AcceleratorState() lowercase_ : int = tensor[None].clone().to(state.device ) lowercase_ : List[str] = gather(__SCREAMING_SNAKE_CASE ).cpu() lowercase_ : str = tensor[0].cpu() for i in range(tensors.shape[0] ): if not torch.equal(tensors[i] , __SCREAMING_SNAKE_CASE ): return False return True class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = returncode lowercase_ : Optional[Any] = stdout lowercase_ : int = stderr async def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[int] ): while True: lowercase_ : Dict = await stream.readline() if line: callback(__SCREAMING_SNAKE_CASE ) else: break async def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Dict=False ): if echo: print('\nRunning: ' , ' '.join(__SCREAMING_SNAKE_CASE ) ) lowercase_ : int = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=__SCREAMING_SNAKE_CASE , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__SCREAMING_SNAKE_CASE , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase_ : Any = [] lowercase_ : str = [] def tee(__SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : int="" ): lowercase_ : Optional[Any] = line.decode('utf-8' ).rstrip() sink.append(__SCREAMING_SNAKE_CASE ) if not quiet: print(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , file=__SCREAMING_SNAKE_CASE ) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ asyncio.create_task(_read_stream(p.stdout , lambda __SCREAMING_SNAKE_CASE : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stdout , label='stdout:' ) ) ), asyncio.create_task(_read_stream(p.stderr , lambda __SCREAMING_SNAKE_CASE : tee(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sys.stderr , label='stderr:' ) ) ), ] , timeout=__SCREAMING_SNAKE_CASE , ) return _RunOutput(await p.wait() , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Optional[Any]=1_80 , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Optional[Any]=True ): lowercase_ : Optional[int] = asyncio.get_event_loop() lowercase_ : List[Any] = loop.run_until_complete( _stream_subprocess(__SCREAMING_SNAKE_CASE , env=__SCREAMING_SNAKE_CASE , stdin=__SCREAMING_SNAKE_CASE , timeout=__SCREAMING_SNAKE_CASE , quiet=__SCREAMING_SNAKE_CASE , echo=__SCREAMING_SNAKE_CASE ) ) lowercase_ : Optional[Any] = ' '.join(__SCREAMING_SNAKE_CASE ) if result.returncode > 0: lowercase_ : int = '\n'.join(result.stderr ) raise RuntimeError( F'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' F'''The combined stderr from workers follows:\n{stderr}''' ) return result class UpperCamelCase ( lowercase_ ): pass def lowercase__( __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): try: lowercase_ : Optional[int] = subprocess.check_output(__SCREAMING_SNAKE_CASE , stderr=subprocess.STDOUT ) if return_stdout: if hasattr(__SCREAMING_SNAKE_CASE , 'decode' ): lowercase_ : Any = output.decode('utf-8' ) return output except subprocess.CalledProcessError as e: raise SubprocessCallException( F'''Command `{" ".join(__SCREAMING_SNAKE_CASE )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
321
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
1
"""simple docstring""" import argparse import requests import torch # pip3 install salesforce-lavis # I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis from lavis.models import load_model_and_preprocess from PIL import Image from transformers import ( AutoTokenizer, BlipaConfig, BlipaForConditionalGeneration, BlipaProcessor, BlipaVisionConfig, BlipImageProcessor, OPTConfig, TaConfig, ) from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD def lowercase__( ): lowercase_ : str = 'https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png' lowercase_ : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert('RGB' ) return image def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): lowercase_ : Union[str, Any] = [] # fmt: off # vision encoder rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') ) rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') ) rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') ) rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') ) rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') ) rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') ) for i in range(config.vision_config.num_hidden_layers ): rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.weight''', F'''vision_model.encoder.layers.{i}.layer_norm1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm1.bias''', F'''vision_model.encoder.layers.{i}.layer_norm1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.weight''', F'''vision_model.encoder.layers.{i}.layer_norm2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.norm2.bias''', F'''vision_model.encoder.layers.{i}.layer_norm2.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.qkv.weight''', F'''vision_model.encoder.layers.{i}.self_attn.qkv.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.weight''', F'''vision_model.encoder.layers.{i}.self_attn.projection.weight''',) ) rename_keys.append((F'''visual_encoder.blocks.{i}.attn.proj.bias''', F'''vision_model.encoder.layers.{i}.self_attn.projection.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc1.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc1.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc1.bias''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.weight''', F'''vision_model.encoder.layers.{i}.mlp.fc2.weight''') ) rename_keys.append((F'''visual_encoder.blocks.{i}.mlp.fc2.bias''', F'''vision_model.encoder.layers.{i}.mlp.fc2.bias''') ) # QFormer rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.layernorm.weight') ) rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.layernorm.bias') ) # fmt: on return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : int = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = val def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ): for i in range(config.vision_config.num_hidden_layers ): # read in original q and v biases lowercase_ : Any = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.q_bias''' ) lowercase_ : List[str] = state_dict.pop(F'''visual_encoder.blocks.{i}.attn.v_bias''' ) # next, set bias in the state dict lowercase_ : Tuple = torch.cat((q_bias, torch.zeros_like(__SCREAMING_SNAKE_CASE , requires_grad=__SCREAMING_SNAKE_CASE ), v_bias) ) lowercase_ : str = qkv_bias def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ): lowercase_ : str = 3_64 if 'coco' in model_name else 2_24 lowercase_ : int = BlipaVisionConfig(image_size=__SCREAMING_SNAKE_CASE ).to_dict() # make sure the models have proper bos_token_id and eos_token_id set (important for generation) # seems like flan-T5 models don't have bos_token_id properly set? if "opt-2.7b" in model_name: lowercase_ : Tuple = OPTConfig.from_pretrained('facebook/opt-2.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict() elif "opt-6.7b" in model_name: lowercase_ : Optional[int] = OPTConfig.from_pretrained('facebook/opt-6.7b' , eos_token_id=__SCREAMING_SNAKE_CASE ).to_dict() elif "t5-xl" in model_name: lowercase_ : int = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() elif "t5-xxl" in model_name: lowercase_ : Dict = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict() lowercase_ : List[Any] = BlipaConfig(vision_config=__SCREAMING_SNAKE_CASE , text_config=__SCREAMING_SNAKE_CASE ) return config, image_size @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ): lowercase_ : Dict = ( AutoTokenizer.from_pretrained('facebook/opt-2.7b' ) if 'opt' in model_name else AutoTokenizer.from_pretrained('google/flan-t5-xl' ) ) lowercase_ : Any = tokenizer('\n' , add_special_tokens=__SCREAMING_SNAKE_CASE ).input_ids[0] lowercase_ , lowercase_ : Any = get_blipa_config(__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = BlipaForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : str = { 'blip2-opt-2.7b': ('blip2_opt', 'pretrain_opt2.7b'), 'blip2-opt-6.7b': ('blip2_opt', 'pretrain_opt6.7b'), 'blip2-opt-2.7b-coco': ('blip2_opt', 'caption_coco_opt2.7b'), 'blip2-opt-6.7b-coco': ('blip2_opt', 'caption_coco_opt6.7b'), 'blip2-flan-t5-xl': ('blip2_t5', 'pretrain_flant5xl'), 'blip2-flan-t5-xl-coco': ('blip2_t5', 'caption_coco_flant5xl'), 'blip2-flan-t5-xxl': ('blip2_t5', 'pretrain_flant5xxl'), } lowercase_ , lowercase_ : Any = model_name_to_original[model_name] # load original model print('Loading original model...' ) lowercase_ : Union[str, Any] = 'cuda' if torch.cuda.is_available() else 'cpu' lowercase_ , lowercase_ , lowercase_ : List[str] = load_model_and_preprocess( name=__SCREAMING_SNAKE_CASE , model_type=__SCREAMING_SNAKE_CASE , is_eval=__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) original_model.eval() print('Done!' ) # update state dict keys lowercase_ : Dict = original_model.state_dict() lowercase_ : List[str] = create_rename_keys(__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # some keys can be renamed efficiently for key, val in state_dict.copy().items(): lowercase_ : Any = state_dict.pop(__SCREAMING_SNAKE_CASE ) if key.startswith('Qformer.bert' ): lowercase_ : Optional[Any] = key.replace('Qformer.bert' , 'qformer' ) if "attention.self" in key: lowercase_ : Optional[int] = key.replace('self' , 'attention' ) if "opt_proj" in key: lowercase_ : List[Any] = key.replace('opt_proj' , 'language_projection' ) if "t5_proj" in key: lowercase_ : List[Any] = key.replace('t5_proj' , 'language_projection' ) if key.startswith('opt' ): lowercase_ : Dict = key.replace('opt' , 'language' ) if key.startswith('t5' ): lowercase_ : Tuple = key.replace('t5' , 'language' ) lowercase_ : Union[str, Any] = val # read in qv biases read_in_q_v_bias(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : Tuple = hf_model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) assert len(__SCREAMING_SNAKE_CASE ) == 0 assert unexpected_keys == ["qformer.embeddings.position_ids"] lowercase_ : Tuple = load_demo_image() lowercase_ : Any = vis_processors['eval'](__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = tokenizer(['\n'] , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE ) # create processor lowercase_ : List[Any] = BlipImageProcessor( size={'height': image_size, 'width': image_size} , image_mean=__SCREAMING_SNAKE_CASE , image_std=__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = BlipaProcessor(image_processor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values.to(__SCREAMING_SNAKE_CASE ) # make sure processor creates exact same pixel values assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) original_model.to(__SCREAMING_SNAKE_CASE ) hf_model.to(__SCREAMING_SNAKE_CASE ) with torch.no_grad(): if "opt" in model_name: lowercase_ : Any = original_model({'image': original_pixel_values, 'text_input': ['']} ).logits lowercase_ : str = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).logits else: lowercase_ : str = original_model( {'image': original_pixel_values, 'text_input': ['\n'], 'text_output': ['\n']} ).logits lowercase_ : int = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 ) lowercase_ : Optional[Any] = hf_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ).logits assert original_logits.shape == logits.shape print('First values of original logits:' , original_logits[0, :3, :3] ) print('First values of HF logits:' , logits[0, :3, :3] ) # assert values if model_name == "blip2-flan-t5-xl": lowercase_ : Union[str, Any] = torch.tensor( [[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=__SCREAMING_SNAKE_CASE ) assert torch.allclose(logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) elif model_name == "blip2-flan-t5-xl-coco": lowercase_ : Optional[int] = torch.tensor( [[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__SCREAMING_SNAKE_CASE ) else: # cast to same type lowercase_ : List[Any] = logits.dtype assert torch.allclose(original_logits.to(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , atol=1E-2 ) print('Looks ok!' ) print('Generating a caption...' ) lowercase_ : Union[str, Any] = '' lowercase_ : Optional[Any] = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors='pt' ).input_ids.to(__SCREAMING_SNAKE_CASE ) lowercase_ : List[Any] = original_model.generate({'image': original_pixel_values} ) lowercase_ : Any = hf_model.generate( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , do_sample=__SCREAMING_SNAKE_CASE , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , ) print('Original generation:' , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = input_ids.shape[1] lowercase_ : Optional[int] = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = [text.strip() for text in output_text] print('HF generation:' , __SCREAMING_SNAKE_CASE ) if pytorch_dump_folder_path is not None: processor.save_pretrained(__SCREAMING_SNAKE_CASE ) hf_model.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: processor.push_to_hub(F'''nielsr/{model_name}''' ) hf_model.push_to_hub(F'''nielsr/{model_name}''' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() __SCREAMING_SNAKE_CASE =[ "blip2-opt-2.7b", "blip2-opt-6.7b", "blip2-opt-2.7b-coco", "blip2-opt-6.7b-coco", "blip2-flan-t5-xl", "blip2-flan-t5-xl-coco", "blip2-flan-t5-xxl", ] parser.add_argument( "--model_name", default="blip2-opt-2.7b", choices=choices, type=str, help="Path to hf config.json of model to convert", ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether to push the model and processor to the hub after converting", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
321
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
1
"""simple docstring""" import argparse import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## __SCREAMING_SNAKE_CASE =16 __SCREAMING_SNAKE_CASE =32 def lowercase__( __SCREAMING_SNAKE_CASE : Accelerator , __SCREAMING_SNAKE_CASE : int = 16 ): lowercase_ : Union[str, Any] = AutoTokenizer.from_pretrained('bert-base-cased' ) lowercase_ : Optional[int] = load_dataset('glue' , 'mrpc' ) def tokenize_function(__SCREAMING_SNAKE_CASE : Dict ): # max_length=None => use the model max length (it's actually the default) lowercase_ : Optional[int] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase_ : Optional[int] = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['idx', 'sentence1', 'sentence2'] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase_ : Tuple = tokenized_datasets.rename_column('label' , 'labels' ) def collate_fn(__SCREAMING_SNAKE_CASE : Tuple ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase_ : str = 1_28 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase_ : Optional[Any] = 16 elif accelerator.mixed_precision != "no": lowercase_ : Any = 8 else: lowercase_ : List[Any] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding='longest' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='pt' , ) # Instantiate dataloaders. lowercase_ : List[Any] = DataLoader( tokenized_datasets['train'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , drop_last=__SCREAMING_SNAKE_CASE ) lowercase_ : str = DataLoader( tokenized_datasets['validation'] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , drop_last=(accelerator.mixed_precision == 'fp8') , ) return train_dataloader, eval_dataloader def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Dict ): # Initialize accelerator lowercase_ : Tuple = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase_ : List[str] = config['lr'] lowercase_ : Union[str, Any] = int(config['num_epochs'] ) lowercase_ : Union[str, Any] = int(config['seed'] ) lowercase_ : List[str] = int(config['batch_size'] ) lowercase_ : Tuple = evaluate.load('glue' , 'mrpc' ) # If the batch size is too big we use gradient accumulation lowercase_ : int = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowercase_ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE lowercase_ : Dict = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) lowercase_ , lowercase_ : Any = get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase_ : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase_ : Dict = model.to(accelerator.device ) # Instantiate optimizer lowercase_ : Dict = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler lowercase_ : Union[str, Any] = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=1_00 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase_ : Tuple = model(**__SCREAMING_SNAKE_CASE ) lowercase_ : int = outputs.loss lowercase_ : List[str] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): lowercase_ : List[str] = model(**__SCREAMING_SNAKE_CASE ) lowercase_ : str = outputs.logits.argmax(dim=-1 ) lowercase_ , lowercase_ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['labels']) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) lowercase_ : List[str] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE ) def lowercase__( ): lowercase_ : int = argparse.ArgumentParser(description='Simple example of training script.' ) parser.add_argument( '--mixed_precision' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose' 'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.' 'and an Nvidia Ampere GPU.' , ) parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' ) lowercase_ : Dict = parser.parse_args() lowercase_ : List[Any] = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
321
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
1
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFXLMRobertaModel @require_tf @require_sentencepiece @require_tokenizers class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Any = TFXLMRobertaModel.from_pretrained('jplu/tf-xlm-roberta-base' ) lowercase_ : List[Any] = { 'input_ids': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] ,dtype=tf.intaa ), # "My dog is cute" 'attention_mask': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] ,dtype=tf.intaa ), } lowercase_ : Optional[Any] = model(__UpperCamelCase )['last_hidden_state'] lowercase_ : int = tf.TensorShape((1, 6, 768) ) self.assertEqual(output.shape ,__UpperCamelCase ) # compare the actual values for a slice. lowercase_ : Optional[Any] = tf.convert_to_tensor( [ [ [0.068_1762, 0.1089_4451, 0.0677_2504], [-0.0642_3668, 0.0236_6615, 0.0432_9344], [-0.0605_7295, 0.0997_4135, -0.0007_0584], ] ] ,dtype=tf.floataa ,) self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
321
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPTaConfig, LayoutLMConfig, LxmertConfig, OpenAIGPTConfig, RobertaConfig, TaConfig, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBartForSequenceClassification, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPTaLMHeadModel, TFLayoutLMForMaskedLM, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForCausalLM, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFTaForConditionalGeneration, TFTransfoXLLMHeadModel, TFWavaVecaModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, WavaVecaConfig, WavaVecaModel, XLMConfig, XLMRobertaConfig, XLNetConfig, is_torch_available, load_pytorch_checkpoint_in_tfa_model, ) from .utils import CONFIG_NAME, WEIGHTS_NAME, cached_file, logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPTaLMHeadModel, LayoutLMForMaskedLM, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, TaForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() __SCREAMING_SNAKE_CASE ={ "bart": ( BartConfig, TFBartForConditionalGeneration, TFBartForSequenceClassification, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), "bert": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-cased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-base-cased-finetuned-mrpc": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "dpr": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), "gpt2": ( GPTaConfig, TFGPTaLMHeadModel, GPTaLMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlnet": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm-roberta": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "transfo-xl": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "openai-gpt": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "roberta": ( RobertaConfig, TFRobertaForCausalLM, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "layoutlm": ( LayoutLMConfig, TFLayoutLMForMaskedLM, LayoutLMForMaskedLM, LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, ), "roberta-large-mnli": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "camembert": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "flaubert": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert-base-distilled-squad": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert-visual-feature-encoder": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "ctrl": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "albert": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "t5": ( TaConfig, TFTaForConditionalGeneration, TaForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "electra": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "wav2vec2": ( WavaVecaConfig, TFWavaVecaModel, WavaVecaModel, WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : Tuple=True ): if model_type not in MODEL_CLASSES: raise ValueError(F'''Unrecognized model type, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase_ , lowercase_ , lowercase_ , lowercase_ : Tuple = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: lowercase_ : Optional[Any] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) lowercase_ : int = config_class.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = True lowercase_ : int = True print(F'''Building TensorFlow model from configuration: {config}''' ) lowercase_ : Union[str, Any] = model_class(__SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): lowercase_ : Union[str, Any] = cached_file( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) # Load PyTorch checkpoint in tf2 model: lowercase_ : List[Any] = load_pytorch_checkpoint_in_tfa_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if compare_with_pt_model: lowercase_ : Optional[Any] = tf_model(tf_model.dummy_inputs , training=__SCREAMING_SNAKE_CASE ) # build the network lowercase_ : List[str] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' ) lowercase_ : str = pt_model_class.from_pretrained( pretrained_model_name_or_path=__SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE , state_dict=__SCREAMING_SNAKE_CASE ) with torch.no_grad(): lowercase_ : List[Any] = pt_model(**pt_model.dummy_inputs ) lowercase_ : Optional[int] = pto[0].numpy() lowercase_ : Optional[Any] = tfo[0].numpy() lowercase_ : str = np.amax(np.abs(np_pt - np_tf ) ) print(F'''Max absolute difference between models outputs {diff}''' ) assert diff <= 2E-2, F'''Error, model absolute difference is >2e-2: {diff}''' # Save pytorch-model print(F'''Save TensorFlow model to {tf_dump_path}''' ) tf_model.save_weights(__SCREAMING_SNAKE_CASE , save_format='h5' ) def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : Any=None , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : int=False , __SCREAMING_SNAKE_CASE : List[Any]=False , __SCREAMING_SNAKE_CASE : Dict=False , ): if args_model_type is None: lowercase_ : Dict = list(MODEL_CLASSES.keys() ) else: lowercase_ : Optional[int] = [args_model_type] for j, model_type in enumerate(__SCREAMING_SNAKE_CASE , start=1 ): print('=' * 1_00 ) print(F''' Converting model type {j}/{len(__SCREAMING_SNAKE_CASE )}: {model_type}''' ) print('=' * 1_00 ) if model_type not in MODEL_CLASSES: raise ValueError(F'''Unrecognized model type {model_type}, should be one of {list(MODEL_CLASSES.keys() )}.''' ) lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ : List[Any] = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: lowercase_ : str = list(aws_model_maps.keys() ) if config_shortcut_names_or_path is None: lowercase_ : Any = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start=1 ): print('-' * 1_00 ) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(F''' Skipping finetuned checkpoint {model_shortcut_name}''' ) continue lowercase_ : Any = model_shortcut_name elif only_convert_finetuned_models: print(F''' Skipping not finetuned checkpoint {model_shortcut_name}''' ) continue print( F''' Converting checkpoint {i}/{len(__SCREAMING_SNAKE_CASE )}: {model_shortcut_name} - model_type {model_type}''' ) print('-' * 1_00 ) if config_shortcut_name in aws_config_map: lowercase_ : Dict = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) else: lowercase_ : Optional[int] = config_shortcut_name if model_shortcut_name in aws_model_maps: lowercase_ : Optional[int] = cached_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , force_download=not use_cached_models ) else: lowercase_ : Any = model_shortcut_name if os.path.isfile(__SCREAMING_SNAKE_CASE ): lowercase_ : Union[str, Any] = 'converted_model' convert_pt_checkpoint_to_tf( model_type=__SCREAMING_SNAKE_CASE , pytorch_checkpoint_path=__SCREAMING_SNAKE_CASE , config_file=__SCREAMING_SNAKE_CASE , tf_dump_path=os.path.join(__SCREAMING_SNAKE_CASE , model_shortcut_name + '-tf_model.h5' ) , compare_with_pt_model=__SCREAMING_SNAKE_CASE , ) if remove_cached_files: os.remove(__SCREAMING_SNAKE_CASE ) os.remove(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file." ) parser.add_argument( "--model_type", default=None, type=str, help=( F"Model type selected in the list of {list(MODEL_CLASSES.keys())}. If not given, will download and " "convert all the models from AWS." ), ) parser.add_argument( "--pytorch_checkpoint_path", default=None, type=str, help=( "Path to the PyTorch checkpoint path or shortcut name to download from AWS. " "If not given, will download and convert all the checkpoints from AWS." ), ) parser.add_argument( "--config_file", default=None, type=str, help=( "The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture. If not given and " "--pytorch_checkpoint_path is not given or is a shortcut name " "use the configuration associated to the shortcut name on the AWS" ), ) parser.add_argument( "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions." ) parser.add_argument( "--use_cached_models", action="store_true", help="Use cached models if possible instead of updating to latest checkpoint versions.", ) parser.add_argument( "--remove_cached_files", action="store_true", help="Remove pytorch models after conversion (save memory when converting in batches).", ) parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.") __SCREAMING_SNAKE_CASE =parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
321
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = KandinskyVaaControlnetPipeline lowercase = ['image_embeds', 'negative_image_embeds', 'hint'] lowercase = ['image_embeds', 'negative_image_embeds', 'hint'] lowercase = [ 'generator', 'height', 'width', 'latents', 'guidance_scale', 'num_inference_steps', 'return_dict', 'guidance_scale', 'num_images_per_prompt', 'output_type', 'return_dict', ] lowercase = False @property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return 32 @property def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return 32 @property def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' return self.time_input_dim @property def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return self.time_input_dim * 4 @property def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return 100 @property def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' torch.manual_seed(0 ) lowercase_ : str = { 'in_channels': 8, # Out channels is double in channels because predicts mean and variance 'out_channels': 8, 'addition_embed_type': 'image_hint', 'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'), 'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'), 'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn', 'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2), 'layers_per_block': 1, 'encoder_hid_dim': self.text_embedder_hidden_size, 'encoder_hid_dim_type': 'image_proj', 'cross_attention_dim': self.cross_attention_dim, 'attention_head_dim': 4, 'resnet_time_scale_shift': 'scale_shift', 'class_embed_type': None, } lowercase_ : Any = UNetaDConditionModel(**__UpperCamelCase ) return model @property def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return { "block_out_channels": [32, 32, 64, 64], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 12, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' torch.manual_seed(0 ) lowercase_ : int = VQModel(**self.dummy_movq_kwargs ) return model def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Optional[Any] = self.dummy_unet lowercase_ : Union[str, Any] = self.dummy_movq lowercase_ : List[Any] = DDIMScheduler( num_train_timesteps=1000 ,beta_schedule='linear' ,beta_start=0.0_0085 ,beta_end=0.012 ,clip_sample=__UpperCamelCase ,set_alpha_to_one=__UpperCamelCase ,steps_offset=1 ,prediction_type='epsilon' ,thresholding=__UpperCamelCase ,) lowercase_ : str = { 'unet': unet, 'scheduler': scheduler, 'movq': movq, } return components def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=0 ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) lowercase_ : List[str] = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to( __UpperCamelCase ) # create hint lowercase_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(__UpperCamelCase ) ).to(__UpperCamelCase ) if str(__UpperCamelCase ).startswith('mps' ): lowercase_ : Union[str, Any] = torch.manual_seed(__UpperCamelCase ) else: lowercase_ : Any = torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase ) lowercase_ : Optional[Any] = { 'image_embeds': image_embeds, 'negative_image_embeds': negative_image_embeds, 'hint': hint, 'generator': generator, 'height': 64, 'width': 64, 'guidance_scale': 4.0, 'num_inference_steps': 2, 'output_type': 'np', } return inputs def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = 'cpu' lowercase_ : List[Any] = self.get_dummy_components() lowercase_ : Optional[Any] = self.pipeline_class(**__UpperCamelCase ) lowercase_ : List[Any] = pipe.to(__UpperCamelCase ) pipe.set_progress_bar_config(disable=__UpperCamelCase ) lowercase_ : str = pipe(**self.get_dummy_inputs(__UpperCamelCase ) ) lowercase_ : Optional[int] = output.images lowercase_ : Optional[Any] = pipe( **self.get_dummy_inputs(__UpperCamelCase ) ,return_dict=__UpperCamelCase ,)[0] lowercase_ : Any = image[0, -3:, -3:, -1] lowercase_ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) lowercase_ : str = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}''' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}''' @slow @require_torch_gpu class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> int: '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' ) lowercase_ : List[Any] = load_image( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinskyv22/hint_image_cat.png' ) lowercase_ : Tuple = torch.from_numpy(np.array(__UpperCamelCase ) ).float() / 255.0 lowercase_ : Optional[int] = hint.permute(2 ,0 ,1 ).unsqueeze(0 ) lowercase_ : Tuple = KandinskyVaaPriorPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-prior' ,torch_dtype=torch.floataa ) pipe_prior.to(__UpperCamelCase ) lowercase_ : Optional[int] = KandinskyVaaControlnetPipeline.from_pretrained( 'kandinsky-community/kandinsky-2-2-controlnet-depth' ,torch_dtype=torch.floataa ) lowercase_ : List[str] = pipeline.to(__UpperCamelCase ) pipeline.set_progress_bar_config(disable=__UpperCamelCase ) lowercase_ : Optional[int] = 'A robot, 4k photo' lowercase_ : str = torch.Generator(device='cuda' ).manual_seed(0 ) lowercase_ , lowercase_ : Union[str, Any] = pipe_prior( __UpperCamelCase ,generator=__UpperCamelCase ,num_inference_steps=5 ,negative_prompt='' ,).to_tuple() lowercase_ : Dict = torch.Generator(device='cuda' ).manual_seed(0 ) lowercase_ : Optional[int] = pipeline( image_embeds=__UpperCamelCase ,negative_image_embeds=__UpperCamelCase ,hint=__UpperCamelCase ,generator=__UpperCamelCase ,num_inference_steps=100 ,output_type='np' ,) lowercase_ : str = output.images[0] assert image.shape == (512, 512, 3) assert_mean_pixel_difference(__UpperCamelCase ,__UpperCamelCase )
321
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
1
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
1
"""simple docstring""" import json import os import tempfile import unittest import numpy as np from datasets import load_dataset from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import ImageGPTImageProcessor class UpperCamelCase ( unittest.TestCase ): def __init__( self ,__UpperCamelCase ,__UpperCamelCase=7 ,__UpperCamelCase=3 ,__UpperCamelCase=18 ,__UpperCamelCase=30 ,__UpperCamelCase=400 ,__UpperCamelCase=True ,__UpperCamelCase=None ,__UpperCamelCase=True ,) -> int: '''simple docstring''' lowercase_ : Any = size if size is not None else {'height': 18, 'width': 18} lowercase_ : Optional[Any] = parent lowercase_ : Any = batch_size lowercase_ : List[str] = num_channels lowercase_ : int = image_size lowercase_ : Tuple = min_resolution lowercase_ : Dict = max_resolution lowercase_ : List[str] = do_resize lowercase_ : str = size lowercase_ : List[str] = do_normalize def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return { # here we create 2 clusters for the sake of simplicity "clusters": np.asarray( [ [0.8866_4436_3403_3203, 0.6618_8293_6954_4983, 0.3891_7464_0178_6804], [-0.6042_5591_4688_1104, -0.0_2295_0088_6052_8469, 0.5423_7973_6900_3296], ] ), "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, } @require_torch @require_vision class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = ImageGPTImageProcessor if is_vision_available() else None def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = ImageGPTImageProcessingTester(self ) @property def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(__UpperCamelCase ,'clusters' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_resize' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'size' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'do_normalize' ) ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} ) lowercase_ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ) self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[str] = self.image_processing_class(**self.image_processor_dict ) lowercase_ : Tuple = json.loads(image_processor.to_json_string() ) for key, value in self.image_processor_dict.items(): if key == "clusters": self.assertTrue(np.array_equal(__UpperCamelCase ,obj[key] ) ) else: self.assertEqual(obj[key] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'image_processor.json' ) image_processor_first.to_json_file(__UpperCamelCase ) lowercase_ : str = self.image_processing_class.from_json_file(__UpperCamelCase ).to_dict() lowercase_ : str = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(__UpperCamelCase ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) with tempfile.TemporaryDirectory() as tmpdirname: image_processor_first.save_pretrained(__UpperCamelCase ) lowercase_ : Union[str, Any] = self.image_processing_class.from_pretrained(__UpperCamelCase ).to_dict() lowercase_ : List[str] = image_processor_first.to_dict() for key, value in image_processor_first.items(): if key == "clusters": self.assertTrue(np.array_equal(__UpperCamelCase ,image_processor_second[key] ) ) else: self.assertEqual(image_processor_first[key] ,__UpperCamelCase ) @unittest.skip('ImageGPT requires clusters at initialization' ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass def lowercase__( ): lowercase_ : Optional[Any] = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' ) lowercase_ : str = Image.open(dataset[4]['file'] ) lowercase_ : str = Image.open(dataset[5]['file'] ) lowercase_ : Dict = [imagea, imagea] return images @require_vision @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Optional[Any] = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' ) lowercase_ : Any = prepare_images() # test non-batched lowercase_ : Tuple = image_processing(images[0] ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(1, 1024) ) lowercase_ : Tuple = [306, 191, 191] self.assertEqual(encoding.input_ids[0, :3].tolist() ,__UpperCamelCase ) # test batched lowercase_ : Tuple = image_processing(__UpperCamelCase ,return_tensors='pt' ) self.assertIsInstance(encoding.input_ids ,torch.LongTensor ) self.assertEqual(encoding.input_ids.shape ,(2, 1024) ) lowercase_ : List[Any] = [303, 13, 13] self.assertEqual(encoding.input_ids[1, -3:].tolist() ,__UpperCamelCase )
321
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
1
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int=0.999 , __SCREAMING_SNAKE_CASE : List[Any]="cosine" , ): if alpha_transform_type == "cosine": def alpha_bar_fn(__SCREAMING_SNAKE_CASE : Union[str, Any] ): return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__SCREAMING_SNAKE_CASE : List[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) lowercase_ : Any = [] for i in range(__SCREAMING_SNAKE_CASE ): lowercase_ : int = i / num_diffusion_timesteps lowercase_ : Optional[int] = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__SCREAMING_SNAKE_CASE ) / alpha_bar_fn(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) ) return torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.floataa ) class UpperCamelCase ( lowercase_ , lowercase_ ): lowercase = [e.name for e in KarrasDiffusionSchedulers] lowercase = 2 @register_to_config def __init__( self ,__UpperCamelCase = 1000 ,__UpperCamelCase = 0.0_0085 ,__UpperCamelCase = 0.012 ,__UpperCamelCase = "linear" ,__UpperCamelCase = None ,__UpperCamelCase = "epsilon" ,__UpperCamelCase = "linspace" ,__UpperCamelCase = 0 ,) -> Tuple: '''simple docstring''' if trained_betas is not None: lowercase_ : Union[str, Any] = torch.tensor(__UpperCamelCase ,dtype=torch.floataa ) elif beta_schedule == "linear": lowercase_ : Optional[int] = torch.linspace(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. lowercase_ : Any = ( torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__UpperCamelCase ,dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule lowercase_ : Union[str, Any] = betas_for_alpha_bar(__UpperCamelCase ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) lowercase_ : Tuple = 1.0 - self.betas lowercase_ : Any = torch.cumprod(self.alphas ,dim=0 ) # set all values self.set_timesteps(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[Any]: '''simple docstring''' if schedule_timesteps is None: lowercase_ : Any = self.timesteps lowercase_ : Optional[Any] = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: lowercase_ : str = 1 if len(__UpperCamelCase ) > 1 else 0 else: lowercase_ : List[str] = timestep.cpu().item() if torch.is_tensor(__UpperCamelCase ) else timestep lowercase_ : int = self._index_counter[timestep_int] return indices[pos].item() @property def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,) -> torch.FloatTensor: '''simple docstring''' lowercase_ : List[str] = self.index_for_timestep(__UpperCamelCase ) if self.state_in_first_order: lowercase_ : List[str] = self.sigmas[step_index] else: lowercase_ : Dict = self.sigmas_interpol[step_index] lowercase_ : Dict = sample / ((sigma**2 + 1) ** 0.5) return sample def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> List[Any]: '''simple docstring''' lowercase_ : Dict = num_inference_steps lowercase_ : Dict = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": lowercase_ : Union[str, Any] = np.linspace(0 ,num_train_timesteps - 1 ,__UpperCamelCase ,dtype=__UpperCamelCase )[::-1].copy() elif self.config.timestep_spacing == "leading": lowercase_ : Any = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase_ : List[Any] = (np.arange(0 ,__UpperCamelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCamelCase ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": lowercase_ : Union[str, Any] = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 lowercase_ : str = (np.arange(__UpperCamelCase ,0 ,-step_ratio )).round().copy().astype(__UpperCamelCase ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) lowercase_ : Tuple = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) lowercase_ : int = torch.from_numpy(np.log(__UpperCamelCase ) ).to(__UpperCamelCase ) lowercase_ : List[Any] = np.interp(__UpperCamelCase ,np.arange(0 ,len(__UpperCamelCase ) ) ,__UpperCamelCase ) lowercase_ : Any = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) lowercase_ : Optional[Any] = torch.from_numpy(__UpperCamelCase ).to(device=__UpperCamelCase ) # interpolate sigmas lowercase_ : Dict = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp() lowercase_ : str = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) lowercase_ : str = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(__UpperCamelCase ).startswith('mps' ): # mps does not support float64 lowercase_ : Optional[int] = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ,dtype=torch.floataa ) else: lowercase_ : Optional[Any] = torch.from_numpy(__UpperCamelCase ).to(__UpperCamelCase ) # interpolate timesteps lowercase_ : Dict = self.sigma_to_t(__UpperCamelCase ).to(__UpperCamelCase ,dtype=timesteps.dtype ) lowercase_ : int = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten() lowercase_ : Dict = torch.cat([timesteps[:1], interleaved_timesteps] ) lowercase_ : Tuple = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter lowercase_ : Tuple = defaultdict(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = sigma.log() # get distribution lowercase_ : Tuple = log_sigma - self.log_sigmas[:, None] # get sigmas range lowercase_ : Any = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) lowercase_ : Optional[Any] = low_idx + 1 lowercase_ : Optional[Any] = self.log_sigmas[low_idx] lowercase_ : List[Any] = self.log_sigmas[high_idx] # interpolate sigmas lowercase_ : int = (low - log_sigma) / (low - high) lowercase_ : Dict = w.clamp(0 ,1 ) # transform interpolation to time range lowercase_ : int = (1 - w) * low_idx + w * high_idx lowercase_ : int = t.view(sigma.shape ) return t @property def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return self.sample is None def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = True ,) -> Union[SchedulerOutput, Tuple]: '''simple docstring''' lowercase_ : List[Any] = self.index_for_timestep(__UpperCamelCase ) # advance index counter by 1 lowercase_ : Dict = timestep.cpu().item() if torch.is_tensor(__UpperCamelCase ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: lowercase_ : List[Any] = self.sigmas[step_index] lowercase_ : int = self.sigmas_interpol[step_index + 1] lowercase_ : Optional[Any] = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method lowercase_ : Union[str, Any] = self.sigmas[step_index - 1] lowercase_ : Any = self.sigmas_interpol[step_index] lowercase_ : List[str] = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API lowercase_ : str = 0 lowercase_ : Any = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": lowercase_ : Tuple = sigma_hat if self.state_in_first_order else sigma_interpol lowercase_ : Any = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": lowercase_ : Dict = sigma_hat if self.state_in_first_order else sigma_interpol lowercase_ : Optional[Any] = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('prediction_type not implemented yet: sample' ) else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order lowercase_ : Any = (sample - pred_original_sample) / sigma_hat # 3. delta timestep lowercase_ : str = sigma_interpol - sigma_hat # store for 2nd order step lowercase_ : Any = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order lowercase_ : Union[str, Any] = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep lowercase_ : int = sigma_next - sigma_hat lowercase_ : int = self.sample lowercase_ : Any = None lowercase_ : Optional[int] = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) -> torch.FloatTensor: '''simple docstring''' lowercase_ : List[str] = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCamelCase ): # mps does not support float64 lowercase_ : List[str] = self.timesteps.to(original_samples.device ,dtype=torch.floataa ) lowercase_ : Tuple = timesteps.to(original_samples.device ,dtype=torch.floataa ) else: lowercase_ : Optional[Any] = self.timesteps.to(original_samples.device ) lowercase_ : Tuple = timesteps.to(original_samples.device ) lowercase_ : Any = [self.index_for_timestep(__UpperCamelCase ,__UpperCamelCase ) for t in timesteps] lowercase_ : Tuple = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): lowercase_ : List[str] = sigma.unsqueeze(-1 ) lowercase_ : Dict = original_samples + noise * sigma return noisy_samples def __len__( self ) -> Optional[Any]: '''simple docstring''' return self.config.num_train_timesteps
321
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
1
"""simple docstring""" from google.protobuf import descriptor as _descriptor from google.protobuf import descriptor_pool as _descriptor_pool from google.protobuf import symbol_database as _symbol_database from google.protobuf.internal import builder as _builder # @@protoc_insertion_point(imports) __SCREAMING_SNAKE_CASE =_symbol_database.Default() __SCREAMING_SNAKE_CASE =_descriptor_pool.Default().AddSerializedFile( B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03" ) __SCREAMING_SNAKE_CASE =globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) _builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals) if _descriptor._USE_C_DESCRIPTORS is False: __SCREAMING_SNAKE_CASE =None __SCREAMING_SNAKE_CASE =B"H\003" # (generated by protobuf compiler, but `_TRAINERSPEC` is not defined) # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001" # _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None # _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001" __SCREAMING_SNAKE_CASE =45 __SCREAMING_SNAKE_CASE =1581 __SCREAMING_SNAKE_CASE =1517 __SCREAMING_SNAKE_CASE =1570 __SCREAMING_SNAKE_CASE =1584 __SCREAMING_SNAKE_CASE =1793 __SCREAMING_SNAKE_CASE =1795 __SCREAMING_SNAKE_CASE =1916 __SCREAMING_SNAKE_CASE =1864 __SCREAMING_SNAKE_CASE =1905 __SCREAMING_SNAKE_CASE =1919 __SCREAMING_SNAKE_CASE =2429 __SCREAMING_SNAKE_CASE =2208 __SCREAMING_SNAKE_CASE =2418 __SCREAMING_SNAKE_CASE =2323 __SCREAMING_SNAKE_CASE =2407 # @@protoc_insertion_point(module_scope)
321
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
1
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray ): lowercase_ , lowercase_ , lowercase_ : Dict = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray ): return (gray > 1_27) & (gray <= 2_55) def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray ): lowercase_ : Tuple = np.zeros_like(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image lowercase_ : Optional[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): lowercase_ : List[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() lowercase_ : Union[str, Any] = int(summation > 0 ) return output if __name__ == "__main__": # read original image __SCREAMING_SNAKE_CASE =Path(__file__).resolve().parent / "image_data" / "lena.jpg" __SCREAMING_SNAKE_CASE =np.array(Image.open(lena_path)) # kernel to be applied __SCREAMING_SNAKE_CASE =np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __SCREAMING_SNAKE_CASE =dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __SCREAMING_SNAKE_CASE =Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
321
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
1
"""simple docstring""" from sklearn.metrics import fa_score import datasets __SCREAMING_SNAKE_CASE ="\nThe F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:\nF1 = 2 * (precision * recall) / (precision + recall)\n" __SCREAMING_SNAKE_CASE ="\nArgs:\n predictions (`list` of `int`): Predicted labels.\n references (`list` of `int`): Ground truth labels.\n labels (`list` of `int`): The set of labels to include when `average` is not set to `'binary'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.\n pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.\n average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.\n\n - 'binary': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.\n - 'micro': Calculate metrics globally by counting the total true positives, false negatives and false positives.\n - 'macro': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - 'weighted': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.\n - 'samples': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n sample_weight (`list` of `float`): Sample weights Defaults to None.\n\nReturns:\n f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.\n\nExamples:\n\n Example 1-A simple binary example\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])\n >>> print(results)\n {'f1': 0.5}\n\n Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)\n >>> print(round(results['f1'], 2))\n 0.67\n\n Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.\n >>> f1_metric = datasets.load_metric(\"f1\")\n >>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])\n >>> print(round(results['f1'], 2))\n 0.35\n\n Example 4-A multiclass example, with different values for the `average` input.\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"macro\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"micro\")\n >>> print(round(results['f1'], 2))\n 0.33\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=\"weighted\")\n >>> print(round(results['f1'], 2))\n 0.27\n >>> results = f1_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {'f1': array([0.8, 0. , 0. ])}\n" __SCREAMING_SNAKE_CASE ="\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCamelCase ( datasets.Metric ): def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { 'predictions': datasets.Sequence(datasets.Value('int32' ) ), 'references': datasets.Sequence(datasets.Value('int32' ) ), } if self.config_name == 'multilabel' else { 'predictions': datasets.Value('int32' ), 'references': datasets.Value('int32' ), } ) ,reference_urls=['https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'] ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=1 ,__UpperCamelCase="binary" ,__UpperCamelCase=None ) -> Any: '''simple docstring''' lowercase_ : List[str] = fa_score( __UpperCamelCase ,__UpperCamelCase ,labels=__UpperCamelCase ,pos_label=__UpperCamelCase ,average=__UpperCamelCase ,sample_weight=__UpperCamelCase ) return {"f1": float(__UpperCamelCase ) if score.size == 1 else score}
321
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
1
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): __SCREAMING_SNAKE_CASE =get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right __SCREAMING_SNAKE_CASE =12_8022 __SCREAMING_SNAKE_CASE =12_8028 @require_sentencepiece class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = MaMaaaTokenizer lowercase = False lowercase = False lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' super().setUp() lowercase_ : str = ['</s>', '<unk>', '▁This', '▁is', '▁a', '▁t', 'est', '\u0120', '<pad>'] lowercase_ : str = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Tuple = Path(self.tmpdirname ) save_json(__UpperCamelCase ,save_dir / VOCAB_FILES_NAMES['vocab_file'] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(__UpperCamelCase ,save_dir / VOCAB_FILES_NAMES['spm_file'] ) lowercase_ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[str]: '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return ( "This is a test", "This is a test", ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : List[Any] = '</s>' lowercase_ : Optional[int] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] ,'</s>' ) self.assertEqual(vocab_keys[1] ,'<unk>' ) self.assertEqual(vocab_keys[-1] ,'<s>' ) self.assertEqual(len(__UpperCamelCase ) ,tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip('Skip this test while all models are still to be uploaded.' ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : int = self.get_tokenizer() lowercase_ : int = tokenizer.tokenize('This is a test' ) self.assertListEqual(__UpperCamelCase ,['▁This', '▁is', '▁a', '▁t', 'est'] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,[2, 3, 4, 5, 6] ,) lowercase_ : Tuple = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(__UpperCamelCase ,['▁This', '▁is', '▁a', '▁t', 'est'] ) lowercase_ : Any = tokenizer.convert_tokens_to_string(__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,'This is a test' ) @slow def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = {'input_ids': [[12_8022, 11_0108, 397, 11, 3_8272, 2247, 12_4811, 285, 1_8105, 1586, 207, 7, 3_9534, 4428, 397, 1019, 1_8105, 1586, 207, 7, 4_1337, 1_6786, 241, 7, 2_0214, 17, 12_5690, 1_0398, 7, 4_4378, 5_8069, 6_8342, 7798, 7343, 11, 299, 3_3310, 4, 158, 3_7350, 9_4077, 4569, 299, 3_3310, 90, 4, 5_2840, 290, 4, 3_1270, 112, 299, 682, 4, 5_2840, 3_9953, 1_4079, 193, 5_2519, 9_0894, 1_7894, 12_0697, 11, 4_0445, 551, 17, 1019, 5_2519, 9_0894, 1_7756, 963, 11, 4_0445, 480, 17, 9792, 1120, 5173, 1393, 6240, 1_6786, 241, 12_0996, 28, 1245, 1393, 11_8240, 1_1123, 1019, 9_3612, 2691, 1_0618, 9_8058, 12_0409, 1928, 279, 4, 4_0683, 367, 178, 207, 1019, 103, 10_3121, 506, 6_5296, 5, 2], [12_8022, 2_1217, 367, 117, 12_5450, 128, 719, 7, 7308, 40, 9_3612, 1_2669, 1116, 1_6704, 71, 1_7785, 3699, 1_5592, 35, 144, 9584, 241, 1_1943, 713, 950, 799, 2247, 8_8427, 150, 149, 11_8813, 12_0706, 1019, 10_6906, 8_1518, 28, 1224, 2_2799, 397, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [12_8022, 1658, 12_3311, 5155, 5578, 4722, 279, 1_4947, 2366, 1120, 1197, 14, 1348, 9232, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=__UpperCamelCase ,model_name='facebook/m2m100_418M' ,revision='c168bae485c864188cf9aa0e4108b0b6934dc91e' ,) @require_torch @require_sentencepiece @require_tokenizers class UpperCamelCase ( unittest.TestCase ): lowercase = 'facebook/m2m100_418M' lowercase = [ 'In my opinion, there are two levels of response from the French government.', 'NSA Affair Emphasizes Complete Lack of Debate on Intelligence', ] lowercase = [ 'Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.', 'L\'affaire NSA souligne l\'absence totale de débat sur le renseignement', ] # fmt: off lowercase = [EN_CODE, 5_9_3, 1_9_4_9, 1_1_5_7_8_1, 4, 7_1_5_8_6, 4_2_3_4, 6_0_6_3_3, 1_2_6_2_3_3, 4_3_2, 1_2_3_8_0_8, 1_5_5_9_2, 1_1_9_7, 1_1_7_1_3_2, 1_2_0_6_1_8, 5, 2] @classmethod def _UpperCAmelCase ( cls ) -> Dict: '''simple docstring''' lowercase_ : MaMaaaTokenizer = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name ,src_lang='en' ,tgt_lang='fr' ) lowercase_ : Tuple = 1 return cls def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id('ar' ) ,12_8006 ) self.assertEqual(self.tokenizer.get_lang_id('en' ) ,12_8022 ) self.assertEqual(self.tokenizer.get_lang_id('ro' ) ,12_8076 ) self.assertEqual(self.tokenizer.get_lang_id('mr' ) ,12_8063 ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = self.tokenizer.get_vocab() self.assertEqual(len(__UpperCamelCase ) ,self.tokenizer.vocab_size ) self.assertEqual(vocab['<unk>'] ,3 ) self.assertIn(self.tokenizer.get_lang_token('en' ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : str = 'en' lowercase_ : List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' self.assertIn(__UpperCamelCase ,self.tokenizer.all_special_ids ) # fmt: off lowercase_ : List[Any] = [FR_CODE, 5364, 82, 8642, 4, 294, 47, 8, 1_4028, 136, 3286, 9706, 6, 9_0797, 6, 14_4012, 162, 8_8128, 3_0061, 5, 2] # fmt: on lowercase_ : Dict = self.tokenizer.decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase ) lowercase_ : str = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) self.assertNotIn(self.tokenizer.eos_token ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = tempfile.mkdtemp() lowercase_ : str = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = MaMaaaTokenizer.from_pretrained(__UpperCamelCase ) self.assertDictEqual(new_tok.lang_token_to_id ,__UpperCamelCase ) @require_torch def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'en' lowercase_ : Any = 'fr' lowercase_ : Optional[int] = self.tokenizer(self.src_text ,text_target=self.tgt_text ,padding=__UpperCamelCase ,return_tensors='pt' ) lowercase_ : Optional[Any] = shift_tokens_right( batch['labels'] ,self.tokenizer.pad_token_id ,self.tokenizer.eos_token_id ) for k in batch: lowercase_ : List[str] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = 'mr' self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) lowercase_ : int = 'zh' self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[Any] = 'mr' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('mr' )] ) self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) lowercase_ : Union[str, Any] = 'zh' self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id('zh' )] ) self.assertListEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens ,[self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = self.tokenizer._build_translation_inputs('A test' ,return_tensors='pt' ,src_lang='en' ,tgt_lang='ar' ) self.assertEqual( nested_simplify(__UpperCamelCase ) ,{ # en_XX, A, test, EOS 'input_ids': [[12_8022, 58, 4183, 2]], 'attention_mask': [[1, 1, 1, 1]], # ar_AR 'forced_bos_token_id': 12_8006, } ,)
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class UpperCamelCase ( lowercase_ ): lowercase = 'megatron-bert' def __init__( self ,__UpperCamelCase=2_9056 ,__UpperCamelCase=1024 ,__UpperCamelCase=24 ,__UpperCamelCase=16 ,__UpperCamelCase=4096 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=0 ,__UpperCamelCase="absolute" ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> List[str]: '''simple docstring''' super().__init__(pad_token_id=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = vocab_size lowercase_ : Optional[Any] = hidden_size lowercase_ : Tuple = num_hidden_layers lowercase_ : List[str] = num_attention_heads lowercase_ : Union[str, Any] = hidden_act lowercase_ : Tuple = intermediate_size lowercase_ : Any = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : List[Any] = max_position_embeddings lowercase_ : Optional[Any] = type_vocab_size lowercase_ : Any = initializer_range lowercase_ : str = layer_norm_eps lowercase_ : Optional[Any] = position_embedding_type lowercase_ : Tuple = use_cache
321
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
1
"""simple docstring""" import numpy as np # Importing the Keras libraries and packages import tensorflow as tf from tensorflow.keras import layers, models if __name__ == "__main__": # Initialising the CNN # (Sequential- Building the model layer by layer) __SCREAMING_SNAKE_CASE =models.Sequential() # Step 1 - Convolution # Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel # (3,3) is the kernel size (filter matrix) classifier.add( layers.ConvaD(32, (3, 3), input_shape=(64, 64, 3), activation="relu") ) # Step 2 - Pooling classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Adding a second convolutional layer classifier.add(layers.ConvaD(32, (3, 3), activation="relu")) classifier.add(layers.MaxPoolingaD(pool_size=(2, 2))) # Step 3 - Flattening classifier.add(layers.Flatten()) # Step 4 - Full connection classifier.add(layers.Dense(units=128, activation="relu")) classifier.add(layers.Dense(units=1, activation="sigmoid")) # Compiling the CNN classifier.compile( optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"] ) # Part 2 - Fitting the CNN to the images # Load Trained model weights # from keras.models import load_model # regressor=load_model('cnn.h5') __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.ImageDataGenerator( rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True ) __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255) __SCREAMING_SNAKE_CASE =train_datagen.flow_from_directory( "dataset/training_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) __SCREAMING_SNAKE_CASE =test_datagen.flow_from_directory( "dataset/test_set", target_size=(64, 64), batch_size=32, class_mode="binary" ) classifier.fit_generator( training_set, steps_per_epoch=5, epochs=30, validation_data=test_set ) classifier.save("cnn.h5") # Part 3 - Making new predictions __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.load_img( "dataset/single_prediction/image.png", target_size=(64, 64) ) __SCREAMING_SNAKE_CASE =tf.keras.preprocessing.image.img_to_array(test_image) __SCREAMING_SNAKE_CASE =np.expand_dims(test_image, axis=0) __SCREAMING_SNAKE_CASE =classifier.predict(test_image) # training_set.class_indices if result[0][0] == 0: __SCREAMING_SNAKE_CASE ="Normal" if result[0][0] == 1: __SCREAMING_SNAKE_CASE ="Abnormality detected"
321
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
1
"""simple docstring""" import json import os import re import unittest from transformers import CodeGenTokenizer, CodeGenTokenizerFast from transformers.models.codegen.tokenization_codegen import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCamelCase ( lowercase_ , unittest.TestCase ): lowercase = CodeGenTokenizer lowercase = CodeGenTokenizerFast lowercase = True lowercase = {'add_prefix_space': True} lowercase = False def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt lowercase_ : List[str] = [ 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', '\u0120', '\u0120l', '\u0120n', '\u0120lo', '\u0120low', 'er', '\u0120lowest', '\u0120newer', '\u0120wider', '<unk>', '<|endoftext|>', ] lowercase_ : Union[str, Any] = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Any = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', ''] lowercase_ : Union[str, Any] = {'unk_token': '<unk>'} lowercase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[str]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' kwargs.update(self.special_tokens_map ) return CodeGenTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = 'lower newer' lowercase_ : Union[str, Any] = 'lower newer' return input_text, output_text def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Dict = CodeGenTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) lowercase_ : Union[str, Any] = 'lower newer' lowercase_ : Any = ['\u0120low', 'er', '\u0120', 'n', 'e', 'w', 'er'] lowercase_ : Tuple = tokenizer.tokenize(__UpperCamelCase ,add_prefix_space=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Tuple = tokens + [tokenizer.unk_token] lowercase_ : List[str] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' if not self.test_rust_tokenizer: return lowercase_ : Union[str, Any] = self.get_tokenizer() lowercase_ : Any = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase ) lowercase_ : Optional[int] = 'lower newer' # Testing tokenization lowercase_ : List[str] = tokenizer.tokenize(__UpperCamelCase ,add_prefix_space=__UpperCamelCase ) lowercase_ : Optional[Any] = rust_tokenizer.tokenize(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) # Testing conversion to ids without special tokens lowercase_ : Optional[int] = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ) lowercase_ : Union[str, Any] = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) # Testing conversion to ids with special tokens lowercase_ : Any = self.get_rust_tokenizer(add_prefix_space=__UpperCamelCase ) lowercase_ : Any = tokenizer.encode(__UpperCamelCase ,add_prefix_space=__UpperCamelCase ) lowercase_ : Any = rust_tokenizer.encode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) # Testing the unknown token lowercase_ : Any = tokens + [rust_tokenizer.unk_token] lowercase_ : Optional[int] = [14, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> str: '''simple docstring''' pass def _UpperCAmelCase ( self ,__UpperCamelCase=15 ) -> List[Any]: '''simple docstring''' for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase ) # Simple input lowercase_ : Union[str, Any] = 'This is a simple input' lowercase_ : Optional[Any] = ['This is a simple input 1', 'This is a simple input 2'] lowercase_ : List[Any] = ('This is a simple input', 'This is a pair') lowercase_ : List[Any] = [ ('This is a simple input 1', 'This is a simple input 2'), ('This is a simple pair 1', 'This is a simple pair 2'), ] # Simple input tests self.assertRaises(__UpperCamelCase ,tokenizer_r.encode ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ) # Simple input self.assertRaises(__UpperCamelCase ,tokenizer_r.encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ) # Simple input self.assertRaises( __UpperCamelCase ,tokenizer_r.batch_encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ,) # Pair input self.assertRaises(__UpperCamelCase ,tokenizer_r.encode ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ) # Pair input self.assertRaises(__UpperCamelCase ,tokenizer_r.encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ) # Pair input self.assertRaises( __UpperCamelCase ,tokenizer_r.batch_encode_plus ,__UpperCamelCase ,max_length=__UpperCamelCase ,padding='max_length' ,) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,pad_token='<pad>' ) # Simple input lowercase_ : Union[str, Any] = 'This is a simple input' lowercase_ : List[Any] = ['This is a simple input looooooooong', 'This is a simple input'] lowercase_ : Dict = ('This is a simple input', 'This is a pair') lowercase_ : Tuple = [ ('This is a simple input loooooong', 'This is a simple input'), ('This is a simple pair loooooong', 'This is a simple pair'), ] lowercase_ : Any = tokenizer.pad_token_id lowercase_ : Any = tokenizer(__UpperCamelCase ,padding='max_length' ,max_length=30 ,return_tensors='np' ) lowercase_ : int = tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,truncate=__UpperCamelCase ,return_tensors='np' ) lowercase_ : int = tokenizer(*__UpperCamelCase ,padding='max_length' ,max_length=60 ,return_tensors='np' ) lowercase_ : Tuple = tokenizer(__UpperCamelCase ,padding=__UpperCamelCase ,truncate=__UpperCamelCase ,return_tensors='np' ) # s # test single string max_length padding self.assertEqual(out_s['input_ids'].shape[-1] ,30 ) self.assertTrue(pad_token_id in out_s['input_ids'] ) self.assertTrue(0 in out_s['attention_mask'] ) # s2 # test automatic padding self.assertEqual(out_sa['input_ids'].shape[-1] ,33 ) # long slice doesn't have padding self.assertFalse(pad_token_id in out_sa['input_ids'][0] ) self.assertFalse(0 in out_sa['attention_mask'][0] ) # short slice does have padding self.assertTrue(pad_token_id in out_sa['input_ids'][1] ) self.assertTrue(0 in out_sa['attention_mask'][1] ) # p # test single pair max_length padding self.assertEqual(out_p['input_ids'].shape[-1] ,60 ) self.assertTrue(pad_token_id in out_p['input_ids'] ) self.assertTrue(0 in out_p['attention_mask'] ) # p2 # test automatic padding pair self.assertEqual(out_pa['input_ids'].shape[-1] ,52 ) # long slice pair doesn't have padding self.assertFalse(pad_token_id in out_pa['input_ids'][0] ) self.assertFalse(0 in out_pa['attention_mask'][0] ) # short slice pair does have padding self.assertTrue(pad_token_id in out_pa['input_ids'][1] ) self.assertTrue(0 in out_pa['attention_mask'][1] ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Any = '$$$' lowercase_ : List[Any] = CodeGenTokenizer.from_pretrained(self.tmpdirname ,bos_token=__UpperCamelCase ,add_bos_token=__UpperCamelCase ) lowercase_ : List[Any] = 'This is a simple input' lowercase_ : Optional[int] = ['This is a simple input 1', 'This is a simple input 2'] lowercase_ : str = tokenizer.bos_token_id lowercase_ : List[str] = tokenizer(__UpperCamelCase ) lowercase_ : str = tokenizer(__UpperCamelCase ) self.assertEqual(out_s.input_ids[0] ,__UpperCamelCase ) self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) ) lowercase_ : int = tokenizer.decode(out_s.input_ids ) lowercase_ : Any = tokenizer.batch_decode(out_sa.input_ids ) self.assertEqual(decode_s.split()[0] ,__UpperCamelCase ) self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) ) @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Any = CodeGenTokenizer.from_pretrained('Salesforce/codegen-350M-mono' ) lowercase_ : List[str] = '\nif len_a > len_b:\n result = a\nelse:\n result = b\n\n\n\n#' lowercase_ : List[str] = '\nif len_a > len_b: result = a\nelse: result = b' lowercase_ : List[str] = tokenizer.encode(__UpperCamelCase ) lowercase_ : List[Any] = ['^#', re.escape('<|endoftext|>' ), '^\'\'\'', '^"""', '\n\n\n'] lowercase_ : Tuple = tokenizer.decode(__UpperCamelCase ,truncate_before_pattern=__UpperCamelCase ) self.assertEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' pass
321
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
1
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class UpperCamelCase ( lowercase_ ): lowercase = 42 lowercase = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
1
"""simple docstring""" from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class UpperCamelCase ( lowercase_ ): lowercase = 'perceiver' def __init__( self ,__UpperCamelCase=256 ,__UpperCamelCase=1280 ,__UpperCamelCase=768 ,__UpperCamelCase=1 ,__UpperCamelCase=26 ,__UpperCamelCase=8 ,__UpperCamelCase=8 ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="kv" ,__UpperCamelCase=1 ,__UpperCamelCase=1 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.02 ,__UpperCamelCase=1e-12 ,__UpperCamelCase=True ,__UpperCamelCase=262 ,__UpperCamelCase=2048 ,__UpperCamelCase=56 ,__UpperCamelCase=[368, 496] ,__UpperCamelCase=16 ,__UpperCamelCase=1920 ,__UpperCamelCase=16 ,__UpperCamelCase=[1, 16, 224, 224] ,**__UpperCamelCase ,) -> List[Any]: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : Dict = num_latents lowercase_ : List[str] = d_latents lowercase_ : Tuple = d_model lowercase_ : Optional[Any] = num_blocks lowercase_ : Dict = num_self_attends_per_block lowercase_ : Tuple = num_self_attention_heads lowercase_ : Optional[Any] = num_cross_attention_heads lowercase_ : Tuple = qk_channels lowercase_ : Tuple = v_channels lowercase_ : Any = cross_attention_shape_for_attention lowercase_ : Any = self_attention_widening_factor lowercase_ : str = cross_attention_widening_factor lowercase_ : Optional[Any] = hidden_act lowercase_ : Dict = attention_probs_dropout_prob lowercase_ : Any = initializer_range lowercase_ : Dict = layer_norm_eps lowercase_ : int = use_query_residual # masked language modeling attributes lowercase_ : Tuple = vocab_size lowercase_ : Union[str, Any] = max_position_embeddings # image classification attributes lowercase_ : Union[str, Any] = image_size # flow attributes lowercase_ : Dict = train_size # multimodal autoencoding attributes lowercase_ : Optional[int] = num_frames lowercase_ : List[Any] = audio_samples_per_frame lowercase_ : str = samples_per_patch lowercase_ : Dict = output_shape class UpperCamelCase ( lowercase_ ): @property def _UpperCAmelCase ( self ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": lowercase_ : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: lowercase_ : Optional[Any] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def _UpperCAmelCase ( self ) -> float: '''simple docstring''' return 1e-4 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = -1 ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = 3 ,__UpperCamelCase = 40 ,__UpperCamelCase = 40 ,) -> Mapping[str, Any]: '''simple docstring''' if isinstance(__UpperCamelCase ,__UpperCamelCase ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : List[Any] = compute_effective_axis_dimension( __UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ,num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX lowercase_ : Optional[Any] = preprocessor.num_special_tokens_to_add(__UpperCamelCase ) lowercase_ : int = compute_effective_axis_dimension( __UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_sequence ,num_token_to_add=__UpperCamelCase ) # Generate dummy inputs according to compute batch and sequence lowercase_ : Dict = [' '.join(['a'] ) * seq_length] * batch_size lowercase_ : int = dict(preprocessor(__UpperCamelCase ,return_tensors=__UpperCamelCase ) ) lowercase_ : Union[str, Any] = inputs.pop('input_ids' ) return inputs elif isinstance(__UpperCamelCase ,__UpperCamelCase ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX lowercase_ : Union[str, Any] = compute_effective_axis_dimension(__UpperCamelCase ,fixed_dimension=OnnxConfig.default_fixed_batch ) lowercase_ : Dict = self._generate_dummy_images(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Union[str, Any] = dict(preprocessor(images=__UpperCamelCase ,return_tensors=__UpperCamelCase ) ) lowercase_ : List[Any] = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
321
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
1
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int = 2_00_00_00 ): lowercase_ : List[Any] = [0 for i in range(n + 1 )] lowercase_ : Any = 1 lowercase_ : List[Any] = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __SCREAMING_SNAKE_CASE ): lowercase_ : int = 1 lowercase_ : Optional[Any] = 0 for i in range(__SCREAMING_SNAKE_CASE ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(F"{solution() = }")
321
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
1
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
"""simple docstring""" import unittest from transformers import BertGenerationConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import BertGenerationDecoder, BertGenerationEncoder class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=99 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=50 ,__UpperCamelCase=0.02 ,__UpperCamelCase=True ,__UpperCamelCase=None ,) -> List[str]: '''simple docstring''' lowercase_ : Dict = parent lowercase_ : Tuple = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Optional[Any] = is_training lowercase_ : Any = use_input_mask lowercase_ : Optional[Any] = vocab_size lowercase_ : str = hidden_size lowercase_ : Any = num_hidden_layers lowercase_ : Dict = num_attention_heads lowercase_ : Optional[int] = intermediate_size lowercase_ : Any = hidden_act lowercase_ : Optional[Any] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : Any = max_position_embeddings lowercase_ : Optional[Any] = initializer_range lowercase_ : Union[str, Any] = use_labels lowercase_ : Union[str, Any] = scope def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : List[str] = None if self.use_input_mask: lowercase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) if self.use_labels: lowercase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Any = self.get_config() return config, input_ids, input_mask, token_labels def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return BertGenerationConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = self.prepare_config_and_inputs() lowercase_ : int = True lowercase_ : Union[str, Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 ) return ( config, input_ids, input_mask, token_labels, encoder_hidden_states, encoder_attention_mask, ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Any: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Optional[Any] = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[Any] = True lowercase_ : str = BertGenerationEncoder(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Union[str, Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,) lowercase_ : Dict = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ,) -> int: '''simple docstring''' lowercase_ : List[str] = True lowercase_ : Union[str, Any] = True lowercase_ : int = BertGenerationDecoder(config=__UpperCamelCase ).to(__UpperCamelCase ).eval() # first forward pass lowercase_ : str = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,use_cache=__UpperCamelCase ,) lowercase_ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase_ : Union[str, Any] = ids_tensor((self.batch_size, 3) ,config.vocab_size ) lowercase_ : Dict = ids_tensor((self.batch_size, 3) ,vocab_size=2 ) # append to next input_ids and lowercase_ : Tuple = torch.cat([input_ids, next_tokens] ,dim=-1 ) lowercase_ : Any = torch.cat([input_mask, next_mask] ,dim=-1 ) lowercase_ : int = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] lowercase_ : List[Any] = model( __UpperCamelCase ,attention_mask=__UpperCamelCase ,encoder_hidden_states=__UpperCamelCase ,encoder_attention_mask=__UpperCamelCase ,past_key_values=__UpperCamelCase ,output_hidden_states=__UpperCamelCase ,)['hidden_states'][0] # select random slice lowercase_ : int = ids_tensor((1,) ,output_from_past.shape[-1] ).item() lowercase_ : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase_ : int = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(__UpperCamelCase ,__UpperCamelCase ,atol=1e-3 ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,*__UpperCamelCase ,) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = BertGenerationDecoder(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Dict = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = self.prepare_config_and_inputs() lowercase_ : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else () lowercase = (BertGenerationDecoder,) if is_torch_available() else () lowercase = ( {'feature-extraction': BertGenerationEncoder, 'text-generation': BertGenerationDecoder} if is_torch_available() else {} ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = BertGenerationEncoderTester(self ) lowercase_ : Tuple = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs() lowercase_ : Optional[int] = 'bert' self.model_tester.create_and_check_model(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Tuple = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase_ : Optional[Any] = None self.model_tester.create_and_check_model_as_decoder( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_for_causal_lm(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : int = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) self.assertIsNotNone(__UpperCamelCase ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : List[Any] = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Tuple = model(__UpperCamelCase )[0] lowercase_ : Dict = torch.Size([1, 8, 1024] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : str = torch.tensor( [[[0.1775, 0.0083, -0.0321], [1.6002, 0.1287, 0.3912], [2.1473, 0.5791, 0.6066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @require_torch class UpperCamelCase ( unittest.TestCase ): @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : str = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' ) lowercase_ : Dict = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] ) with torch.no_grad(): lowercase_ : Dict = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = torch.Size([1, 8, 5_0358] ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : Dict = torch.tensor( [[[-0.5788, -2.5994, -3.7054], [0.0438, 4.7997, 1.8795], [1.5862, 6.6409, 4.4638]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import convert_to_rgb, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['pixel_values'] def __init__( self ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = True ,__UpperCamelCase = 1 / 255 ,__UpperCamelCase = True ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = True ,**__UpperCamelCase ,) -> None: '''simple docstring''' super().__init__(**__UpperCamelCase ) lowercase_ : str = size if size is not None else {'height': 384, 'width': 384} lowercase_ : int = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase ) lowercase_ : int = do_resize lowercase_ : Tuple = size lowercase_ : Any = resample lowercase_ : Optional[Any] = do_rescale lowercase_ : Any = rescale_factor lowercase_ : List[Any] = do_normalize lowercase_ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase_ : Optional[int] = image_std if image_std is not None else OPENAI_CLIP_STD lowercase_ : Any = do_convert_rgb def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = PILImageResampling.BICUBIC ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray: '''simple docstring''' lowercase_ : Tuple = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) lowercase_ : str = (size['height'], size['width']) return resize(__UpperCamelCase ,size=__UpperCamelCase ,resample=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Dict: '''simple docstring''' return rescale(__UpperCamelCase ,scale=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> np.ndarray: '''simple docstring''' return normalize(__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ,data_format=__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = ChannelDimension.FIRST ,**__UpperCamelCase ,) -> PIL.Image.Image: '''simple docstring''' lowercase_ : Optional[int] = do_resize if do_resize is not None else self.do_resize lowercase_ : List[Any] = resample if resample is not None else self.resample lowercase_ : Tuple = do_rescale if do_rescale is not None else self.do_rescale lowercase_ : str = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase_ : Dict = do_normalize if do_normalize is not None else self.do_normalize lowercase_ : Dict = image_mean if image_mean is not None else self.image_mean lowercase_ : List[Any] = image_std if image_std is not None else self.image_std lowercase_ : Any = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase_ : str = size if size is not None else self.size lowercase_ : Tuple = get_size_dict(__UpperCamelCase ,default_to_square=__UpperCamelCase ) lowercase_ : Tuple = make_list_of_images(__UpperCamelCase ) if not valid_images(__UpperCamelCase ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_resize and size is None or resample is None: raise ValueError('Size and resample must be specified if do_resize is True.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('Image mean and std must be specified if do_normalize is True.' ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase_ : List[str] = [convert_to_rgb(__UpperCamelCase ) for image in images] # All transformations expect numpy arrays. lowercase_ : Optional[int] = [to_numpy_array(__UpperCamelCase ) for image in images] if do_resize: lowercase_ : Any = [self.resize(image=__UpperCamelCase ,size=__UpperCamelCase ,resample=__UpperCamelCase ) for image in images] if do_rescale: lowercase_ : Dict = [self.rescale(image=__UpperCamelCase ,scale=__UpperCamelCase ) for image in images] if do_normalize: lowercase_ : Any = [self.normalize(image=__UpperCamelCase ,mean=__UpperCamelCase ,std=__UpperCamelCase ) for image in images] lowercase_ : Union[str, Any] = [to_channel_dimension_format(__UpperCamelCase ,__UpperCamelCase ) for image in images] lowercase_ : Union[str, Any] = BatchFeature(data={'pixel_values': images} ,tensor_type=__UpperCamelCase ) return encoded_outputs
321
"""simple docstring""" import unittest from pathlib import Path from tempfile import NamedTemporaryFile, TemporaryDirectory from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline from transformers.convert_graph_to_onnx import ( convert, ensure_valid_input, generate_identified_filename, infer_shapes, quantize, ) from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' return None class UpperCamelCase : def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' return None class UpperCamelCase ( unittest.TestCase ): lowercase = [ # (model_name, model_kwargs) ('bert-base-cased', {}), ('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore ] @require_tf @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) @require_torch @slow def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' from transformers import BertModel lowercase_ : Union[str, Any] = ['[UNK]', '[SEP]', '[CLS]', '[PAD]', '[MASK]', 'some', 'other', 'words'] with NamedTemporaryFile(mode='w+t' ) as vocab_file: vocab_file.write('\n'.join(__UpperCamelCase ) ) vocab_file.flush() lowercase_ : List[str] = BertTokenizerFast(vocab_file.name ) with TemporaryDirectory() as bert_save_dir: lowercase_ : Optional[Any] = BertModel(BertConfig(vocab_size=len(__UpperCamelCase ) ) ) model.save_pretrained(__UpperCamelCase ) self._test_export(__UpperCamelCase ,'pt' ,12 ,__UpperCamelCase ) @require_tf @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Optional[int] = self._test_export(__UpperCamelCase ,'tf' ,12 ,**__UpperCamelCase ) lowercase_ : int = quantize(Path(__UpperCamelCase ) ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) @require_torch @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST: lowercase_ : Tuple = self._test_export(__UpperCamelCase ,'pt' ,12 ,**__UpperCamelCase ) lowercase_ : Tuple = quantize(__UpperCamelCase ) # Ensure the actual quantized model is not bigger than the original one if quantized_path.stat().st_size >= Path(__UpperCamelCase ).stat().st_size: self.fail('Quantized model is bigger than initial ONNX model' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=None ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' try: # Compute path with TemporaryDirectory() as tempdir: lowercase_ : Dict = Path(__UpperCamelCase ).joinpath('model.onnx' ) # Remove folder if exists if path.parent.exists(): path.parent.rmdir() # Export convert(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) return path except Exception as e: self.fail(__UpperCamelCase ) @require_torch @require_tokenizers @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' from transformers import BertModel lowercase_ : List[Any] = BertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Union[str, Any] = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'pt' ) @require_tf @require_tokenizers @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' from transformers import TFBertModel lowercase_ : Optional[Any] = TFBertModel(BertConfig.from_pretrained('lysandre/tiny-bert-random' ) ) lowercase_ : Any = BertTokenizerFast.from_pretrained('lysandre/tiny-bert-random' ) self._test_infer_dynamic_axis(__UpperCamelCase ,__UpperCamelCase ,'tf' ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Tuple = FeatureExtractionPipeline(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = ['input_ids', 'token_type_ids', 'attention_mask', 'output_0', 'output_1'] lowercase_ , lowercase_ , lowercase_ , lowercase_ : Any = infer_shapes(__UpperCamelCase ,__UpperCamelCase ) # Assert all variables are present self.assertEqual(len(__UpperCamelCase ) ,len(__UpperCamelCase ) ) self.assertTrue(all(var_name in shapes for var_name in variable_names ) ) self.assertSequenceEqual(variable_names[:3] ,__UpperCamelCase ) self.assertSequenceEqual(variable_names[3:] ,__UpperCamelCase ) # Assert inputs are {0: batch, 1: sequence} for var_name in ["input_ids", "token_type_ids", "attention_mask"]: self.assertDictEqual(shapes[var_name] ,{0: 'batch', 1: 'sequence'} ) # Assert outputs are {0: batch, 1: sequence} and {0: batch} self.assertDictEqual(shapes['output_0'] ,{0: 'batch', 1: 'sequence'} ) self.assertDictEqual(shapes['output_1'] ,{0: 'batch'} ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = ['input_ids', 'attention_mask', 'token_type_ids'] lowercase_ : List[Any] = {'input_ids': [1, 2, 3, 4], 'attention_mask': [0, 0, 0, 0], 'token_type_ids': [1, 1, 1, 1]} lowercase_ , lowercase_ : int = ensure_valid_input(FuncContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the same number of args (all are valid) self.assertEqual(len(__UpperCamelCase ) ,3 ) # Should have exactly the same input names self.assertEqual(set(__UpperCamelCase ) ,set(__UpperCamelCase ) ) # Parameter should be reordered according to their respective place in the function: # (input_ids, token_type_ids, attention_mask) self.assertEqual(__UpperCamelCase ,(tokens['input_ids'], tokens['token_type_ids'], tokens['attention_mask']) ) # Generated args are interleaved with another args (for instance parameter "past" in GPT2) lowercase_ , lowercase_ : Optional[int] = ensure_valid_input(FuncNonContiguousArgs() ,__UpperCamelCase ,__UpperCamelCase ) # Should have exactly the one arg (all before the one not provided "some_other_args") self.assertEqual(len(__UpperCamelCase ) ,1 ) self.assertEqual(len(__UpperCamelCase ) ,1 ) # Should have only "input_ids" self.assertEqual(inputs_args[0] ,tokens['input_ids'] ) self.assertEqual(ordered_input_names[0] ,'input_ids' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = generate_identified_filename(Path('/home/something/my_fake_model.onnx' ) ,'-test' ) self.assertEqual('/home/something/my_fake_model-test.onnx' ,generated.as_posix() )
321
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ..models.auto import AutoModelForVisionaSeq from ..utils import requires_backends from .base import PipelineTool if TYPE_CHECKING: from PIL import Image class UpperCamelCase ( lowercase_ ): lowercase = 'Salesforce/blip-image-captioning-base' lowercase = ( 'This is a tool that generates a description of an image. It takes an input named `image` which should be the ' 'image to caption, and returns a text that contains the description in English.' ) lowercase = 'image_captioner' lowercase = AutoModelForVisionaSeq lowercase = ['image'] lowercase = ['text'] def __init__( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' requires_backends(self ,['vision'] ) super().__init__(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return self.pre_processor(images=__UpperCamelCase ,return_tensors='pt' ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.model.generate(**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return self.pre_processor.batch_decode(__UpperCamelCase ,skip_special_tokens=__UpperCamelCase )[0].strip()
321
"""simple docstring""" import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch if is_torch_available(): import torch from transformers.generation import DisjunctiveConstraint @require_torch class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Union[str, Any] = [[1, 2, 4], [1, 2, 3, 4]] lowercase_ : List[Any] = DisjunctiveConstraint(__UpperCamelCase ) self.assertTrue(isinstance(dc.token_ids ,__UpperCamelCase ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) ) with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[Any] = [[1, 2], [1, 2, 3, 4]] with self.assertRaises(__UpperCamelCase ): DisjunctiveConstraint(__UpperCamelCase ) # fails here def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = [[1, 2, 3], [1, 2, 4]] lowercase_ : Dict = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Union[str, Any] = dc.update(1 ) lowercase_ : str = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = dc.update(2 ) lowercase_ : Any = stepped is True and completed is False and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Tuple = dc.update(3 ) lowercase_ : Union[str, Any] = stepped is True and completed is True and reset is False self.assertTrue(__UpperCamelCase ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 3] ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]] lowercase_ : Union[str, Any] = DisjunctiveConstraint(__UpperCamelCase ) lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : str = dc.update(4 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.current_seq == [1, 2, 4] ) lowercase_ , lowercase_ , lowercase_ : List[str] = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.current_seq == [1, 2, 4, 5] ) dc.reset() lowercase_ , lowercase_ , lowercase_ : Optional[int] = dc.update(1 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 3 ) self.assertTrue(dc.current_seq == [1] ) lowercase_ , lowercase_ , lowercase_ : int = dc.update(2 ) self.assertTrue(not dc.completed ) self.assertTrue(dc.remaining() == 2 ) self.assertTrue(dc.current_seq == [1, 2] ) lowercase_ , lowercase_ , lowercase_ : Dict = dc.update(5 ) self.assertTrue(dc.completed ) # Completed! self.assertTrue(dc.remaining() == 0 ) self.assertTrue(dc.current_seq == [1, 2, 5] )
321
1
"""simple docstring""" import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=32 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=[10, 20, 30, 40] ,__UpperCamelCase=[2, 2, 3, 2] ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=["stage2", "stage3", "stage4"] ,__UpperCamelCase=3 ,__UpperCamelCase=None ,) -> Any: '''simple docstring''' lowercase_ : List[Any] = parent lowercase_ : List[str] = batch_size lowercase_ : int = image_size lowercase_ : List[str] = num_channels lowercase_ : int = num_stages lowercase_ : str = hidden_sizes lowercase_ : List[Any] = depths lowercase_ : List[str] = is_training lowercase_ : Dict = use_labels lowercase_ : Optional[Any] = intermediate_size lowercase_ : int = hidden_act lowercase_ : Any = type_sequence_label_size lowercase_ : List[str] = initializer_range lowercase_ : List[str] = out_features lowercase_ : Union[str, Any] = num_labels lowercase_ : Optional[Any] = scope lowercase_ : Optional[int] = num_stages def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Dict = None if self.use_labels: lowercase_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=__UpperCamelCase ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=__UpperCamelCase ,loss_ignore_index=255 ,num_labels=self.num_labels ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' lowercase_ : List[str] = UperNetForSemanticSegmentation(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[str] = model(__UpperCamelCase ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : str = config_and_inputs lowercase_ : str = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (UperNetForSemanticSegmentation,) if is_torch_available() else () lowercase = {'image-segmentation': UperNetForSemanticSegmentation} if is_torch_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Tuple = UperNetModelTester(self ) lowercase_ : Dict = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ , lowercase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Any = model_class(__UpperCamelCase ) lowercase_ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : str = [*signature.parameters.keys()] lowercase_ : Dict = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase ) @unittest.skip(reason='UperNet does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='UperNet does not support input and output embeddings' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip(reason='UperNet does not have a base model' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' def check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : Any = model_class(__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() with torch.no_grad(): lowercase_ : int = model(**self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase_ : Tuple = self.model_tester.num_stages self.assertEqual(len(__UpperCamelCase ) ,expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : Tuple = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase_ : Optional[int] = True check_hidden_states_output(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : str = _config_zero_init(__UpperCamelCase ) lowercase_ : Optional[int] = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowercase_ : Dict = model_class(config=__UpperCamelCase ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' ,) @unittest.skip(reason='UperNet does not have tied weights' ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass @slow def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def lowercase__( ): lowercase_ : List[Any] = hf_hub_download( repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' ) lowercase_ : str = Image.open(__SCREAMING_SNAKE_CASE ).convert('RGB' ) return image @require_torch @require_vision @slow class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' ) lowercase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(__UpperCamelCase ) lowercase_ : int = prepare_img() lowercase_ : List[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) with torch.no_grad(): lowercase_ : int = model(**__UpperCamelCase ) lowercase_ : Tuple = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Optional[Any] = torch.tensor( [[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' ) lowercase_ : Optional[int] = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(__UpperCamelCase ) lowercase_ : Union[str, Any] = prepare_img() lowercase_ : List[Any] = processor(images=__UpperCamelCase ,return_tensors='pt' ).to(__UpperCamelCase ) with torch.no_grad(): lowercase_ : Any = model(**__UpperCamelCase ) lowercase_ : Optional[Any] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Any = torch.tensor( [[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] ).to(__UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
"""simple docstring""" import argparse import tensorflow as tf import torch from transformers import BertConfig, BertForMaskedLM from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertPooler, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging logging.set_verbosity_info() def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str ): def get_masked_lm_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : int = F'''masked_lm/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : str = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[Any] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_array(__SCREAMING_SNAKE_CASE : str ): lowercase_ : Tuple = F'''encoder/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : Tuple = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : List[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) def get_encoder_attention_layer_array(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] ): lowercase_ : List[Any] = F'''encoder/_transformer_layers/{layer_index}/_attention_layer/{name}/.ATTRIBUTES/VARIABLE_VALUE''' lowercase_ : Optional[Any] = tf.train.load_variable(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = array.reshape(__SCREAMING_SNAKE_CASE ) if "kernel" in name: lowercase_ : List[str] = array.transpose() return torch.from_numpy(__SCREAMING_SNAKE_CASE ) print(F'''Loading model based on config from {config_path}...''' ) lowercase_ : Any = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[Any] = BertForMaskedLM(__SCREAMING_SNAKE_CASE ) # Layers for layer_index in range(0 , config.num_hidden_layers ): lowercase_ : BertLayer = model.bert.encoder.layer[layer_index] # Self-attention lowercase_ : BertSelfAttention = layer.attention.self lowercase_ : str = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/kernel' , self_attn.query.weight.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_query_dense/bias' , self_attn.query.bias.data.shape ) lowercase_ : Tuple = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/kernel' , self_attn.key.weight.data.shape ) lowercase_ : int = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_key_dense/bias' , self_attn.key.bias.data.shape ) lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/kernel' , self_attn.value.weight.data.shape ) lowercase_ : List[Any] = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_value_dense/bias' , self_attn.value.bias.data.shape ) # Self-attention Output lowercase_ : BertSelfOutput = layer.attention.output lowercase_ : Dict = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/kernel' , self_output.dense.weight.data.shape ) lowercase_ : Any = get_encoder_attention_layer_array( __SCREAMING_SNAKE_CASE , '_output_dense/bias' , self_output.dense.bias.data.shape ) lowercase_ : Tuple = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/gamma' ) lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_attention_layer_norm/beta' ) # Intermediate lowercase_ : BertIntermediate = layer.intermediate lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/kernel' ) lowercase_ : Optional[int] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_intermediate_dense/bias' ) # Output lowercase_ : BertOutput = layer.output lowercase_ : Any = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/kernel' ) lowercase_ : Optional[Any] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_dense/bias' ) lowercase_ : List[str] = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/gamma' ) lowercase_ : int = get_encoder_layer_array(__SCREAMING_SNAKE_CASE , '_output_layer_norm/beta' ) # Embeddings lowercase_ : Optional[Any] = get_encoder_array('_position_embedding_layer/embeddings' ) lowercase_ : int = get_encoder_array('_type_embedding_layer/embeddings' ) lowercase_ : Any = get_encoder_array('_embedding_norm_layer/gamma' ) lowercase_ : Optional[Any] = get_encoder_array('_embedding_norm_layer/beta' ) # LM Head lowercase_ : int = model.cls.predictions.transform lowercase_ : str = get_masked_lm_array('dense/kernel' ) lowercase_ : Optional[Any] = get_masked_lm_array('dense/bias' ) lowercase_ : Optional[Any] = get_masked_lm_array('layer_norm/gamma' ) lowercase_ : Optional[int] = get_masked_lm_array('layer_norm/beta' ) lowercase_ : List[str] = get_masked_lm_array('embedding_table' ) # Pooling lowercase_ : Optional[Any] = BertPooler(config=__SCREAMING_SNAKE_CASE ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/kernel' ) lowercase_ : BertPooler = get_encoder_array('_pooler_layer/bias' ) # Export final model model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Integration test - should load without any errors ;) lowercase_ : Tuple = BertForMaskedLM.from_pretrained(__SCREAMING_SNAKE_CASE ) print(new_model.eval() ) print('Model conversion was done sucessfully!' ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow Token Dropping checkpoint path." ) parser.add_argument( "--bert_config_file", type=str, required=True, help="The config json file corresponding to the BERT model. This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", type=str, required=True, help="Path to the output PyTorch model.", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
321
1
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : int = generate_pascal_triangle(__SCREAMING_SNAKE_CASE ) for row_idx in range(__SCREAMING_SNAKE_CASE ): # Print left spaces for _ in range(num_rows - row_idx - 1 ): print(end=' ' ) # Print row values for col_idx in range(row_idx + 1 ): if col_idx != row_idx: print(triangle[row_idx][col_idx] , end=' ' ) else: print(triangle[row_idx][col_idx] , end='' ) print() def lowercase__( __SCREAMING_SNAKE_CASE : int ): if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) lowercase_ : list[list[int]] = [] for current_row_idx in range(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = populate_current_row(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) triangle.append(__SCREAMING_SNAKE_CASE ) return triangle def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Optional[int] = [-1] * (current_row_idx + 1) # first and last elements of current row are equal to 1 lowercase_ , lowercase_ : Optional[int] = 1, 1 for current_col_idx in range(1 , __SCREAMING_SNAKE_CASE ): calculate_current_element( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return current_row def lowercase__( __SCREAMING_SNAKE_CASE : list[list[int]] , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , ): lowercase_ : Tuple = triangle[current_row_idx - 1][current_col_idx - 1] lowercase_ : int = triangle[current_row_idx - 1][current_col_idx] lowercase_ : Any = above_to_left_elt + above_to_right_elt def lowercase__( __SCREAMING_SNAKE_CASE : int ): if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError('The input value of \'num_rows\' should be \'int\'' ) if num_rows == 0: return [] elif num_rows < 0: raise ValueError( 'The input value of \'num_rows\' should be greater than or equal to 0' ) lowercase_ : list[list[int]] = [[1]] for row_index in range(1 , __SCREAMING_SNAKE_CASE ): lowercase_ : Optional[int] = [0] + result[-1] + [0] lowercase_ : Dict = row_index + 1 # Calculate the number of distinct elements in a row lowercase_ : Optional[int] = sum(divmod(__SCREAMING_SNAKE_CASE , 2 ) ) lowercase_ : int = [ temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1 ) ] lowercase_ : List[Any] = row_first_half[: (row_index + 1) // 2] row_second_half.reverse() lowercase_ : List[Any] = row_first_half + row_second_half result.append(__SCREAMING_SNAKE_CASE ) return result def lowercase__( ): from collections.abc import Callable from timeit import timeit def benchmark_a_function(__SCREAMING_SNAKE_CASE : Callable , __SCREAMING_SNAKE_CASE : int ) -> None: lowercase_ : int = F'''{func.__name__}({value})''' lowercase_ : List[str] = timeit(F'''__main__.{call}''' , setup='import __main__' ) # print(f"{call:38} = {func(value)} -- {timing:.4f} seconds") print(F'''{call:38} -- {timing:.4f} seconds''' ) for value in range(15 ): # (1, 7, 14): for func in (generate_pascal_triangle, generate_pascal_triangle_optimized): benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
321
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
1
"""simple docstring""" from PIL import Image def lowercase__( __SCREAMING_SNAKE_CASE : Image , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Optional[Any] = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level)) def contrast(__SCREAMING_SNAKE_CASE : int ) -> int: return int(1_28 + factor * (c - 1_28) ) return img.point(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": # Load image with Image.open("image_data/lena.jpg") as img: # Change contrast to 170 __SCREAMING_SNAKE_CASE =change_contrast(img, 170) cont_img.save("image_data/lena_high_contrast.png", format="png")
321
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
321
1
"""simple docstring""" import io import os import unicodedata from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ="▁" __SCREAMING_SNAKE_CASE ={"vocab_file": "vocab.txt", "sentencepiece_model_ckpt": "sentencepiece.bpe.model"} __SCREAMING_SNAKE_CASE ={ "sentencepiece_model_file": "sentencepiece.bpe.model", "vocab_file": "vocab.txt", } __SCREAMING_SNAKE_CASE ={ "vocab_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/vocab.txt", }, "sentencepiece_model_file": { "ernie-m-base": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", "ernie-m-large": "https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/sentencepiece.bpe.model", }, } __SCREAMING_SNAKE_CASE ={ "ernie-m-base": 514, "ernie-m-large": 514, } __SCREAMING_SNAKE_CASE ={ "ernie-m-base": {"do_lower_case": False}, "ernie-m-large": {"do_lower_case": False}, } class UpperCamelCase ( lowercase_ ): lowercase = ["input_ids"] lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_INIT_CONFIGURATION lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = RESOURCE_FILES_NAMES def __init__( self ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=False ,__UpperCamelCase="utf8" ,__UpperCamelCase="[UNK]" ,__UpperCamelCase="[SEP]" ,__UpperCamelCase="[PAD]" ,__UpperCamelCase="[CLS]" ,__UpperCamelCase="[MASK]" ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : int = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( do_lower_case=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,vocab_file=__UpperCamelCase ,encoding=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,) lowercase_ : str = do_lower_case lowercase_ : Union[str, Any] = sentencepiece_model_ckpt lowercase_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(__UpperCamelCase ) # to mimic paddlenlp.transformers.ernie_m.tokenizer.ErnieMTokenizer functioning if vocab_file is not None: lowercase_ : Union[str, Any] = self.load_vocab(filepath=__UpperCamelCase ) else: lowercase_ : Optional[Any] = {self.sp_model.id_to_piece(__UpperCamelCase ): id for id in range(self.sp_model.get_piece_size() )} lowercase_ : int = {v: k for k, v in self.vocab.items()} def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' if text is None: return None lowercase_ : Union[str, Any] = self.tokenize(__UpperCamelCase ) lowercase_ , lowercase_ : Optional[int] = '', [] for i, ch in enumerate(__UpperCamelCase ): if ch in self.SP_CHAR_MAPPING: lowercase_ : Any = self.SP_CHAR_MAPPING.get(__UpperCamelCase ) else: lowercase_ : Any = unicodedata.normalize('NFKC' ,__UpperCamelCase ) if self.is_whitespace(__UpperCamelCase ): continue normalized_text += ch char_mapping.extend([i] * len(__UpperCamelCase ) ) lowercase_ , lowercase_ , lowercase_ : Optional[Any] = normalized_text, [], 0 if self.do_lower_case: lowercase_ : int = text.lower() for token in split_tokens: if token[:1] == "▁": lowercase_ : Optional[int] = token[1:] lowercase_ : Union[str, Any] = text[offset:].index(__UpperCamelCase ) + offset lowercase_ : int = start + len(__UpperCamelCase ) token_mapping.append((char_mapping[start], char_mapping[end - 1] + 1) ) lowercase_ : Dict = end return token_mapping @property def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.vocab ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' return dict(self.vocab ,**self.added_tokens_encoder ) def __getstate__( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.__dict__.copy() lowercase_ : List[str] = None return state def __setstate__( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : int = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): lowercase_ : List[str] = {} lowercase_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.sentencepiece_model_ckpt ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return "".join((self.SP_CHAR_MAPPING.get(__UpperCamelCase ,__UpperCamelCase ) for c in text) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ,__UpperCamelCase=64 ,__UpperCamelCase=0.1 ) -> List[Any]: '''simple docstring''' if self.sp_model_kwargs.get('enable_sampling' ) is True: lowercase_ : Optional[int] = True if self.sp_model_kwargs.get('alpha' ) is not None: lowercase_ : List[str] = self.sp_model_kwargs.get('alpha' ) if self.sp_model_kwargs.get('nbest_size' ) is not None: lowercase_ : List[Any] = self.sp_model_kwargs.get('nbest_size' ) if not enable_sampling: lowercase_ : List[Any] = self.sp_model.EncodeAsPieces(__UpperCamelCase ) else: lowercase_ : Union[str, Any] = self.sp_model.SampleEncodeAsPieces(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Union[str, Any] = [] for pi, piece in enumerate(__UpperCamelCase ): if piece == SPIECE_UNDERLINE: if not pieces[pi + 1].startswith(__UpperCamelCase ) and pi != 0: new_pieces.append(__UpperCamelCase ) continue else: continue lowercase_ : Dict = 0 for i, chunk in enumerate(__UpperCamelCase ): if chunk == SPIECE_UNDERLINE: continue if self.is_ch_char(__UpperCamelCase ) or self.is_punct(__UpperCamelCase ): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) new_pieces.append(__UpperCamelCase ) lowercase_ : Optional[Any] = i + 1 elif chunk.isdigit() and i > 0 and not piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase_ : Any = i elif not chunk.isdigit() and i > 0 and piece[i - 1].isdigit(): if i > lst_i and piece[lst_i:i] != SPIECE_UNDERLINE: new_pieces.append(piece[lst_i:i] ) lowercase_ : Any = i if len(__UpperCamelCase ) > lst_i: new_pieces.append(piece[lst_i:] ) return new_pieces def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[str] = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip() return out_string def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = self.convert_ids_to_tokens(__UpperCamelCase ) lowercase_ : Optional[Any] = ''.join(__UpperCamelCase ).replace(__UpperCamelCase ,' ' ).strip() return out_string def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.vocab.get(__UpperCamelCase ,self.vocab.get(self.unk_token ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' return self.reverse_vocab.get(__UpperCamelCase ,self.unk_token ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> List[str]: '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] lowercase_ : str = [self.cls_token_id] lowercase_ : Any = [self.sep_token_id] return _cls + token_ids_a + _sep + _sep + token_ids_a + _sep def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ) -> str: '''simple docstring''' if offset_mapping_a is None: return [(0, 0)] + offset_mapping_a + [(0, 0)] return [(0, 0)] + offset_mapping_a + [(0, 0), (0, 0)] + offset_mapping_a + [(0, 0)] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=False ) -> Union[str, Any]: '''simple docstring''' if already_has_special_tokens: if token_ids_a is not None: raise ValueError( 'You should not supply a second sequence if the provided sequence of ' 'ids is already formatted with special tokens for the model.' ) return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a] if token_ids_a is not None: return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase )) + [1] return [1] + ([0] * len(__UpperCamelCase )) + [1] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: # [CLS] X [SEP] return (len(__UpperCamelCase ) + 2) * [0] # [CLS] A [SEP] [SEP] B [SEP] return [0] * (len(__UpperCamelCase ) + 1) + [1] * (len(__UpperCamelCase ) + 3) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' if "\u4e00" <= char <= "\u9fff": return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' if ("a" <= char <= "z") or ("A" <= char <= "Z"): return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' if char in ",;:.?!~,;:。?!《》【】": return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' if char == " " or char == "\t" or char == "\n" or char == "\r": return True if len(__UpperCamelCase ) == 1: lowercase_ : Tuple = unicodedata.category(__UpperCamelCase ) if cat == "Zs": return True return False def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Any = {} with io.open(__UpperCamelCase ,'r' ,encoding='utf-8' ) as f: for index, line in enumerate(__UpperCamelCase ): lowercase_ : List[str] = line.rstrip('\n' ) lowercase_ : int = int(__UpperCamelCase ) return token_to_idx def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Dict = 0 if os.path.isdir(__UpperCamelCase ): lowercase_ : Optional[Any] = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) else: lowercase_ : Tuple = (filename_prefix + '-' if filename_prefix else '') + save_directory with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as writer: for token, token_index in sorted(self.vocab.items() ,key=lambda __UpperCamelCase : kv[1] ): if index != token_index: logger.warning( f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' ' Please check that the vocabulary is not corrupted!' ) lowercase_ : Optional[Any] = token_index writer.write(token + '\n' ) index += 1 lowercase_ : Any = os.path.join(__UpperCamelCase ,'sentencepiece.bpe.model' ) with open(__UpperCamelCase ,'wb' ) as fi: lowercase_ : Union[str, Any] = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (vocab_file,)
321
"""simple docstring""" import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel from transformers.models.esm.modeling_esm import ( ESM_PRETRAINED_MODEL_ARCHIVE_LIST, EsmEmbeddings, create_position_ids_from_input_ids, ) class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=7 ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=False ,__UpperCamelCase=True ,__UpperCamelCase=33 ,__UpperCamelCase=32 ,__UpperCamelCase=5 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=512 ,__UpperCamelCase=16 ,__UpperCamelCase=2 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=4 ,__UpperCamelCase=None ,) -> List[Any]: '''simple docstring''' lowercase_ : Any = parent lowercase_ : str = batch_size lowercase_ : List[Any] = seq_length lowercase_ : Dict = is_training lowercase_ : Tuple = use_input_mask lowercase_ : Optional[Any] = use_token_type_ids lowercase_ : List[str] = use_labels lowercase_ : Any = vocab_size lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : int = num_attention_heads lowercase_ : int = intermediate_size lowercase_ : List[Any] = hidden_act lowercase_ : Optional[int] = hidden_dropout_prob lowercase_ : Tuple = attention_probs_dropout_prob lowercase_ : Tuple = max_position_embeddings lowercase_ : Optional[int] = type_vocab_size lowercase_ : Optional[int] = type_sequence_label_size lowercase_ : Dict = initializer_range lowercase_ : int = num_labels lowercase_ : Any = num_choices lowercase_ : int = scope def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) lowercase_ : Dict = None if self.use_input_mask: lowercase_ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase_ : Tuple = None lowercase_ : Tuple = None lowercase_ : Tuple = None if self.use_labels: lowercase_ : List[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) lowercase_ : int = ids_tensor([self.batch_size] ,self.num_choices ) lowercase_ : str = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' return EsmConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,pad_token_id=1 ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : List[Any] = EsmModel(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : Tuple = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Dict = EsmForMaskedLM(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : int = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.num_labels lowercase_ : int = EsmForTokenClassification(config=__UpperCamelCase ) model.to(__UpperCamelCase ) model.eval() lowercase_ : List[Any] = model(__UpperCamelCase ,attention_mask=__UpperCamelCase ,labels=__UpperCamelCase ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Any = self.prepare_config_and_inputs() ( ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ( lowercase_ ) , ) : Optional[int] = config_and_inputs lowercase_ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = False lowercase = ( ( EsmForMaskedLM, EsmModel, EsmForSequenceClassification, EsmForTokenClassification, ) if is_torch_available() else () ) lowercase = () lowercase = ( { 'feature-extraction': EsmModel, 'fill-mask': EsmForMaskedLM, 'text-classification': EsmForSequenceClassification, 'token-classification': EsmForTokenClassification, 'zero-shot': EsmForSequenceClassification, } if is_torch_available() else {} ) lowercase = True def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = EsmModelTester(self ) lowercase_ : List[Any] = ConfigTester(self ,config_class=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' self.config_tester.run_common_tests() def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: lowercase_ : Optional[Any] = type self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> str: '''simple docstring''' for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase_ : List[str] = EsmModel.from_pretrained(__UpperCamelCase ) self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : str = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : Tuple = torch.as_tensor([[12, 31, 13, model.padding_idx]] ) lowercase_ : List[Any] = torch.as_tensor( [ [ 0 + model.padding_idx + 1, 1 + model.padding_idx + 1, 2 + model.padding_idx + 1, model.padding_idx, ] ] ) lowercase_ : Tuple = create_position_ids_from_input_ids(__UpperCamelCase ,model.padding_idx ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs()[0] lowercase_ : List[Any] = EsmEmbeddings(config=__UpperCamelCase ) lowercase_ : List[Any] = torch.empty(2 ,4 ,30 ) lowercase_ : List[str] = [ 0 + embeddings.padding_idx + 1, 1 + embeddings.padding_idx + 1, 2 + embeddings.padding_idx + 1, 3 + embeddings.padding_idx + 1, ] lowercase_ : List[str] = torch.as_tensor([expected_single_positions, expected_single_positions] ) lowercase_ : List[str] = embeddings.create_position_ids_from_inputs_embeds(__UpperCamelCase ) self.assertEqual(position_ids.shape ,expected_positions.shape ) self.assertTrue(torch.all(torch.eq(__UpperCamelCase ,__UpperCamelCase ) ) ) @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' pass @unittest.skip('Esm does not support embedding resizing' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' pass @require_torch class UpperCamelCase ( lowercase_ ): @slow def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : Any = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : List[Any] = torch.tensor([[0, 1, 2, 3, 4, 5]] ) lowercase_ : List[str] = model(__UpperCamelCase )[0] lowercase_ : Optional[int] = 33 lowercase_ : Union[str, Any] = torch.Size((1, 6, vocab_size) ) self.assertEqual(output.shape ,__UpperCamelCase ) lowercase_ : List[str] = torch.tensor( [[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) ) @slow def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' with torch.no_grad(): lowercase_ : int = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' ) model.eval() lowercase_ : Tuple = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) lowercase_ : Dict = model(__UpperCamelCase )[0] # compare the actual values for a slice. lowercase_ : Any = torch.tensor( [[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] ,__UpperCamelCase ,atol=1e-4 ) )
321
1
"""simple docstring""" import math def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if initial_intensity < 0: raise ValueError('The value of intensity cannot be negative' ) # handling of negative values of initial intensity if angle < 0 or angle > 3_60: raise ValueError('In Malus Law, the angle is in the range 0-360 degrees' ) # handling of values out of allowed range return initial_intensity * (math.cos(math.radians(__SCREAMING_SNAKE_CASE ) ) ** 2) if __name__ == "__main__": import doctest doctest.testmod(name="malus_law")
321
"""simple docstring""" import pickle import numpy as np from matplotlib import pyplot as plt class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=0.2 ,__UpperCamelCase=0.2 ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = bp_numa lowercase_ : Dict = bp_numa lowercase_ : Tuple = bp_numa lowercase_ : List[Any] = conva_get[:2] lowercase_ : int = conva_get[2] lowercase_ : Dict = size_pa lowercase_ : int = rate_w lowercase_ : Union[str, Any] = rate_t lowercase_ : Dict = [ np.mat(-1 * np.random.rand(self.conva[0] ,self.conva[0] ) + 0.5 ) for i in range(self.conva[1] ) ] lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : Union[str, Any] = np.mat(-1 * np.random.rand(self.num_bpa ,self.num_bpa ) + 0.5 ) lowercase_ : str = -2 * np.random.rand(self.conva[1] ) + 1 lowercase_ : Tuple = -2 * np.random.rand(self.num_bpa ) + 1 lowercase_ : Union[str, Any] = -2 * np.random.rand(self.num_bpa ) + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : int = { 'num_bp1': self.num_bpa, 'num_bp2': self.num_bpa, 'num_bp3': self.num_bpa, 'conv1': self.conva, 'step_conv1': self.step_conva, 'size_pooling1': self.size_poolinga, 'rate_weight': self.rate_weight, 'rate_thre': self.rate_thre, 'w_conv1': self.w_conva, 'wkj': self.wkj, 'vji': self.vji, 'thre_conv1': self.thre_conva, 'thre_bp2': self.thre_bpa, 'thre_bp3': self.thre_bpa, } with open(__UpperCamelCase ,'wb' ) as f: pickle.dump(__UpperCamelCase ,__UpperCamelCase ) print(f'''Model saved: {save_path}''' ) @classmethod def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' with open(__UpperCamelCase ,'rb' ) as f: lowercase_ : Any = pickle.load(__UpperCamelCase ) # noqa: S301 lowercase_ : str = model_dic.get('conv1' ) conv_get.append(model_dic.get('step_conv1' ) ) lowercase_ : Union[str, Any] = model_dic.get('size_pooling1' ) lowercase_ : Optional[Any] = model_dic.get('num_bp1' ) lowercase_ : str = model_dic.get('num_bp2' ) lowercase_ : Optional[Any] = model_dic.get('num_bp3' ) lowercase_ : Union[str, Any] = model_dic.get('rate_weight' ) lowercase_ : Optional[int] = model_dic.get('rate_thre' ) # create model instance lowercase_ : Any = CNN(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) # modify model parameter lowercase_ : Optional[Any] = model_dic.get('w_conv1' ) lowercase_ : Tuple = model_dic.get('wkj' ) lowercase_ : Union[str, Any] = model_dic.get('vji' ) lowercase_ : Optional[Any] = model_dic.get('thre_conv1' ) lowercase_ : Dict = model_dic.get('thre_bp2' ) lowercase_ : Optional[int] = model_dic.get('thre_bp3' ) return conv_ins def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' return 1 / (1 + np.exp(-1 * x )) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return round(__UpperCamelCase ,3 ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Dict = convs[0] lowercase_ : Any = convs[1] lowercase_ : Optional[Any] = np.shape(__UpperCamelCase )[0] # get the data slice of original image data, data_focus lowercase_ : Tuple = [] for i_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): for j_focus in range(0 ,size_data - size_conv + 1 ,__UpperCamelCase ): lowercase_ : List[Any] = data[ i_focus : i_focus + size_conv, j_focus : j_focus + size_conv ] data_focus.append(__UpperCamelCase ) # calculate the feature map of every single kernel, and saved as list of matrix lowercase_ : Dict = [] lowercase_ : Dict = int((size_data - size_conv) / conv_step + 1 ) for i_map in range(__UpperCamelCase ): lowercase_ : Tuple = [] for i_focus in range(len(__UpperCamelCase ) ): lowercase_ : Optional[int] = ( np.sum(np.multiply(data_focus[i_focus] ,w_convs[i_map] ) ) - thre_convs[i_map] ) featuremap.append(self.sig(__UpperCamelCase ) ) lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ).reshape( __UpperCamelCase ,__UpperCamelCase ) data_featuremap.append(__UpperCamelCase ) # expanding the data slice to One dimenssion lowercase_ : Optional[int] = [] for each_focus in data_focus: focusa_list.extend(self.Expand_Mat(__UpperCamelCase ) ) lowercase_ : str = np.asarray(__UpperCamelCase ) return focus_list, data_featuremap def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase="average_pool" ) -> Tuple: '''simple docstring''' lowercase_ : Union[str, Any] = len(featuremaps[0] ) lowercase_ : str = int(size_map / size_pooling ) lowercase_ : Optional[int] = [] for i_map in range(len(__UpperCamelCase ) ): lowercase_ : int = featuremaps[i_map] lowercase_ : List[str] = [] for i_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j_focus in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[str] = feature_map[ i_focus : i_focus + size_pooling, j_focus : j_focus + size_pooling, ] if pooling_type == "average_pool": # average pooling map_pooled.append(np.average(__UpperCamelCase ) ) elif pooling_type == "max_pooling": # max pooling map_pooled.append(np.max(__UpperCamelCase ) ) lowercase_ : Dict = np.asmatrix(__UpperCamelCase ).reshape(__UpperCamelCase ,__UpperCamelCase ) featuremap_pooled.append(__UpperCamelCase ) return featuremap_pooled def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Any: '''simple docstring''' lowercase_ : Tuple = [] for i in range(len(__UpperCamelCase ) ): lowercase_ : Optional[Any] = np.shape(data[i] ) lowercase_ : List[str] = data[i].reshape(1 ,shapes[0] * shapes[1] ) lowercase_ : List[str] = data_listed.getA().tolist()[0] data_expanded.extend(__UpperCamelCase ) lowercase_ : int = np.asarray(__UpperCamelCase ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Any = np.asarray(__UpperCamelCase ) lowercase_ : Any = np.shape(__UpperCamelCase ) lowercase_ : Optional[Any] = data_mat.reshape(1 ,shapes[0] * shapes[1] ) return data_expanded def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> str: '''simple docstring''' lowercase_ : Any = [] lowercase_ : List[Any] = 0 for i_map in range(__UpperCamelCase ): lowercase_ : List[str] = np.ones((size_map, size_map) ) for i in range(0 ,__UpperCamelCase ,__UpperCamelCase ): for j in range(0 ,__UpperCamelCase ,__UpperCamelCase ): lowercase_ : List[Any] = pd_pool[ i_pool ] lowercase_ : Any = i_pool + 1 lowercase_ : Optional[int] = np.multiply( __UpperCamelCase ,np.multiply(out_map[i_map] ,(1 - out_map[i_map]) ) ) pd_all.append(__UpperCamelCase ) return pd_all def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase=bool ) -> Optional[int]: '''simple docstring''' print('----------------------Start Training-------------------------' ) print((' - - Shape: Train_Data ', np.shape(__UpperCamelCase )) ) print((' - - Shape: Teach_Data ', np.shape(__UpperCamelCase )) ) lowercase_ : int = 0 lowercase_ : Tuple = [] lowercase_ : Tuple = 1_0000 while rp < n_repeat and mse >= error_accuracy: lowercase_ : List[str] = 0 print(f'''-------------Learning Time {rp}--------------''' ) for p in range(len(__UpperCamelCase ) ): # print('------------Learning Image: %d--------------'%p) lowercase_ : int = np.asmatrix(datas_train[p] ) lowercase_ : Any = np.asarray(datas_teach[p] ) lowercase_ , lowercase_ : Tuple = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Any = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : Optional[int] = np.shape(__UpperCamelCase ) lowercase_ : Optional[int] = self._expand(__UpperCamelCase ) lowercase_ : int = data_bp_input lowercase_ : Tuple = np.dot(__UpperCamelCase ,self.vji.T ) - self.thre_bpa lowercase_ : Dict = self.sig(__UpperCamelCase ) lowercase_ : int = np.dot(__UpperCamelCase ,self.wkj.T ) - self.thre_bpa lowercase_ : int = self.sig(__UpperCamelCase ) # --------------Model Leaning ------------------------ # calculate error and gradient--------------- lowercase_ : str = np.multiply( (data_teach - bp_outa) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Optional[int] = np.multiply( np.dot(__UpperCamelCase ,self.wkj ) ,np.multiply(__UpperCamelCase ,(1 - bp_outa) ) ) lowercase_ : Any = np.dot(__UpperCamelCase ,self.vji ) lowercase_ : str = pd_i_all / (self.size_poolinga * self.size_poolinga) lowercase_ : Dict = pd_conva_pooled.T.getA().tolist() lowercase_ : List[Any] = self._calculate_gradient_from_pool( __UpperCamelCase ,__UpperCamelCase ,shape_featuremapa[0] ,shape_featuremapa[1] ,self.size_poolinga ,) # weight and threshold learning process--------- # convolution layer for k_conv in range(self.conva[1] ): lowercase_ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] ) lowercase_ : Dict = self.rate_weight * np.dot(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : List[Any] = self.w_conva[k_conv] + delta_w.reshape( (self.conva[0], self.conva[0]) ) lowercase_ : Dict = ( self.thre_conva[k_conv] - np.sum(pd_conva_all[k_conv] ) * self.rate_thre ) # all connected layer lowercase_ : Optional[int] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight lowercase_ : Any = self.vji + pd_j_all.T * bp_outa * self.rate_weight lowercase_ : str = self.thre_bpa - pd_k_all * self.rate_thre lowercase_ : Any = self.thre_bpa - pd_j_all * self.rate_thre # calculate the sum error of all single image lowercase_ : List[Any] = np.sum(abs(data_teach - bp_outa ) ) error_count += errors # print(' ----Teach ',data_teach) # print(' ----BP_output ',bp_out3) lowercase_ : int = rp + 1 lowercase_ : Union[str, Any] = error_count / patterns all_mse.append(__UpperCamelCase ) def draw_error(): lowercase_ : str = [error_accuracy for i in range(int(n_repeat * 1.2 ) )] plt.plot(__UpperCamelCase ,'+-' ) plt.plot(__UpperCamelCase ,'r--' ) plt.xlabel('Learning Times' ) plt.ylabel('All_mse' ) plt.grid(__UpperCamelCase ,alpha=0.5 ) plt.show() print('------------------Training Complished---------------------' ) print((' - - Training epoch: ', rp, f''' - - Mse: {mse:.6f}''') ) if draw_e: draw_error() return mse def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = [] print('-------------------Start Testing-------------------------' ) print((' - - Shape: Test_Data ', np.shape(__UpperCamelCase )) ) for p in range(len(__UpperCamelCase ) ): lowercase_ : List[Any] = np.asmatrix(datas_test[p] ) lowercase_ , lowercase_ : Optional[Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : List[Any] = self.pooling(__UpperCamelCase ,self.size_poolinga ) lowercase_ : List[str] = self._expand(__UpperCamelCase ) lowercase_ : Any = data_bp_input lowercase_ : Optional[Any] = bp_outa * self.vji.T - self.thre_bpa lowercase_ : str = self.sig(__UpperCamelCase ) lowercase_ : List[str] = bp_outa * self.wkj.T - self.thre_bpa lowercase_ : Optional[int] = self.sig(__UpperCamelCase ) produce_out.extend(bp_outa.getA().tolist() ) lowercase_ : List[str] = [list(map(self.do_round ,__UpperCamelCase ) ) for each in produce_out] return np.asarray(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : Optional[int] = np.asmatrix(__UpperCamelCase ) lowercase_ , lowercase_ : Union[str, Any] = self.convolute( __UpperCamelCase ,self.conva ,self.w_conva ,self.thre_conva ,conv_step=self.step_conva ,) lowercase_ : Optional[int] = self.pooling(__UpperCamelCase ,self.size_poolinga ) return data_conveda, data_pooleda if __name__ == "__main__": pass
321
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import AlignProcessor, EfficientNetImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Any = tempfile.mkdtemp() lowercase_ : Optional[int] = [ '[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] lowercase_ : List[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) lowercase_ : Optional[Any] = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : Optional[int] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Any: '''simple docstring''' return BertTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> List[Any]: '''simple docstring''' return BertTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Any: '''simple docstring''' return EfficientNetImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Tuple = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : Optional[Any] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : Optional[int] = self.get_rust_tokenizer() lowercase_ : List[str] = self.get_image_processor() lowercase_ : Tuple = AlignProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : Dict = AlignProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = AlignProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = AlignProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : Dict = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : List[str] = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : List[Any] = AlignProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : Optional[int] = AlignProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : int = self.prepare_image_inputs() lowercase_ : Optional[int] = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Tuple = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : int = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : Optional[int] = AlignProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Union[str, Any] = processor(text=__UpperCamelCase ) lowercase_ : List[Any] = tokenizer(__UpperCamelCase ,padding='max_length' ,max_length=64 ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : int = self.get_image_processor() lowercase_ : int = self.get_tokenizer() lowercase_ : Union[str, Any] = AlignProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Union[str, Any] = self.get_image_processor() lowercase_ : Tuple = self.get_tokenizer() lowercase_ : List[Any] = AlignProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[str] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : str = processor.batch_decode(__UpperCamelCase ) lowercase_ : Union[str, Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : List[Any] = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : int = AlignProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Union[str, Any] = self.prepare_image_inputs() lowercase_ : Optional[Any] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
321
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'] ,model_result['ss'] ): lowercase_ : Dict = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = 'sgugger/tiny-distilbert-classification' lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,only_pretrain_model=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : Optional[Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Dict = 'sshleifer/tiny-gpt2' lowercase_ : Tuple = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : str = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Optional[int] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : Any = 'sshleifer/tiny-gpt2' lowercase_ : Any = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : int = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' lowercase_ : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' lowercase_ : Optional[int] = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : str = TensorFlowBenchmark(__UpperCamelCase ,[config] ) lowercase_ : List[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : str = 'patrickvonplaten/t5-tiny-random' lowercase_ : int = AutoConfig.from_pretrained(__UpperCamelCase ) lowercase_ : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ,configs=[config] ) lowercase_ : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices('GPU' ) ) == 0 ,'Cannot do xla on CPU.' ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ : Optional[int] = 'sshleifer/tiny-gpt2' lowercase_ : Union[str, Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,training=__UpperCamelCase ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,use_xla=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Union[str, Any] = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : int = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : List[str] = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Any = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,save_to_csv=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,inference_time_csv_file=os.path.join(__UpperCamelCase ,'inf_time.csv' ) ,inference_memory_csv_file=os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ,env_info_csv_file=os.path.join(__UpperCamelCase ,'env.csv' ) ,multi_process=__UpperCamelCase ,) lowercase_ : List[str] = TensorFlowBenchmark(__UpperCamelCase ) benchmark.run() self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'env.csv' ) ).exists() ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(__UpperCamelCase ): self.assertTrue(hasattr(__UpperCamelCase ,'sequential' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'cumulative' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'current' ) ) self.assertTrue(hasattr(__UpperCamelCase ,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: lowercase_ : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] ,inference=__UpperCamelCase ,sequence_lengths=[8] ,batch_sizes=[1] ,log_filename=os.path.join(__UpperCamelCase ,'log.txt' ) ,log_print=__UpperCamelCase ,trace_memory_line_by_line=__UpperCamelCase ,eager_mode=__UpperCamelCase ,multi_process=__UpperCamelCase ,) lowercase_ : Dict = TensorFlowBenchmark(__UpperCamelCase ) lowercase_ : Any = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) self.assertTrue(Path(os.path.join(__UpperCamelCase ,'log.txt' ) ).exists() )
321
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer from ...utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ="▁" __SCREAMING_SNAKE_CASE ={"vocab_file": "sentencepiece.bpe.model"} __SCREAMING_SNAKE_CASE ={ "vocab_file": { "facebook/mbart-large-50-one-to-many-mmt": ( "https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model" ), } } __SCREAMING_SNAKE_CASE ={ "facebook/mbart-large-50-one-to-many-mmt": 1024, } # fmt: off __SCREAMING_SNAKE_CASE =["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", "af_ZA", "az_AZ", "bn_IN", "fa_IR", "he_IL", "hr_HR", "id_ID", "ka_GE", "km_KH", "mk_MK", "ml_IN", "mn_MN", "mr_IN", "pl_PL", "ps_AF", "pt_XX", "sv_SE", "sw_KE", "ta_IN", "te_IN", "th_TH", "tl_XX", "uk_UA", "ur_PK", "xh_ZA", "gl_ES", "sl_SI"] class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = ['input_ids', 'attention_mask'] lowercase = [] lowercase = [] def __init__( self ,__UpperCamelCase ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> None: '''simple docstring''' lowercase_ : Union[str, Any] = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else mask_token lowercase_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs lowercase_ : Dict = kwargs.get('additional_special_tokens' ,[] ) kwargs["additional_special_tokens"] += [ code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"] ] super().__init__( src_lang=__UpperCamelCase ,tgt_lang=__UpperCamelCase ,eos_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,sp_model_kwargs=self.sp_model_kwargs ,**__UpperCamelCase ,) lowercase_ : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(__UpperCamelCase ) ) lowercase_ : Optional[Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token lowercase_ : Union[str, Any] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab lowercase_ : Dict = 1 lowercase_ : Dict = len(self.sp_model ) lowercase_ : Any = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__UpperCamelCase ) } lowercase_ : Optional[int] = {v: k for k, v in self.lang_code_to_id.items()} lowercase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id ) lowercase_ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()} lowercase_ : Tuple = src_lang if src_lang is not None else 'en_XX' lowercase_ : Tuple = self.lang_code_to_id[self._src_lang] lowercase_ : Union[str, Any] = tgt_lang self.set_src_lang_special_tokens(self._src_lang ) @property def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self._src_lang @src_lang.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Any = new_src_lang self.set_src_lang_special_tokens(self._src_lang ) def __getstate__( self ) -> Dict: '''simple docstring''' lowercase_ : str = self.__dict__.copy() lowercase_ : Optional[int] = None return state def __setstate__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Optional[Any] = d # for backward compatibility if not hasattr(self ,'sp_model_kwargs' ): lowercase_ : Optional[Any] = {} lowercase_ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[Any] = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return self.sp_model.encode(__UpperCamelCase ,out_type=__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] lowercase_ : List[Any] = self.sp_model.PieceToId(__UpperCamelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _UpperCAmelCase ( self ,__UpperCamelCase ) -> str: '''simple docstring''' if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' lowercase_ : List[Any] = [] lowercase_ : str = '' lowercase_ : List[Any] = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(__UpperCamelCase ) + token lowercase_ : List[Any] = True lowercase_ : List[str] = [] else: current_sub_tokens.append(__UpperCamelCase ) lowercase_ : List[str] = False out_string += self.sp_model.decode(__UpperCamelCase ) return out_string.strip() def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' if not os.path.isdir(__UpperCamelCase ): logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase_ : List[Any] = os.path.join( __UpperCamelCase ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file ,__UpperCamelCase ) elif not os.path.isfile(self.vocab_file ): with open(__UpperCamelCase ,'wb' ) as fi: lowercase_ : int = self.sp_model.serialized_model_proto() fi.write(__UpperCamelCase ) return (out_vocab_file,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ) -> List[int]: '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__UpperCamelCase ,token_ids_a=__UpperCamelCase ,already_has_special_tokens=__UpperCamelCase ) lowercase_ : str = [1] * len(self.prefix_tokens ) lowercase_ : Union[str, Any] = [1] * len(self.suffix_tokens ) if token_ids_a is None: return prefix_ones + ([0] * len(__UpperCamelCase )) + suffix_ones return prefix_ones + ([0] * len(__UpperCamelCase )) + ([0] * len(__UpperCamelCase )) + suffix_ones def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' if token_ids_a is None: return self.prefix_tokens + token_ids_a + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) -> Tuple: '''simple docstring''' if src_lang is None or tgt_lang is None: raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' ) lowercase_ : Optional[int] = src_lang lowercase_ : Union[str, Any] = self(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_tensors=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : Dict = self.convert_tokens_to_ids(__UpperCamelCase ) lowercase_ : List[str] = tgt_lang_id return inputs def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = "en_XX" ,__UpperCamelCase = None ,__UpperCamelCase = "ro_RO" ,**__UpperCamelCase ,) -> BatchEncoding: '''simple docstring''' lowercase_ : List[Any] = src_lang lowercase_ : Union[str, Any] = tgt_lang return super().prepare_seqaseq_batch(__UpperCamelCase ,__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' return self.set_src_lang_special_tokens(self.src_lang ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' return self.set_tgt_lang_special_tokens(self.tgt_lang ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : Dict = self.lang_code_to_id[src_lang] lowercase_ : Tuple = [self.cur_lang_code_id] lowercase_ : List[Any] = [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = self.lang_code_to_id[tgt_lang] lowercase_ : Any = [self.cur_lang_code_id] lowercase_ : int = [self.eos_token_id]
321
"""simple docstring""" from typing import List, Optional, Union import numpy as np from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import PaddingStrategy, TensorType, logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) class UpperCamelCase ( lowercase_ ): lowercase = ['input_values', 'padding_mask'] def __init__( self ,__UpperCamelCase = 1 ,__UpperCamelCase = 2_4000 ,__UpperCamelCase = 0.0 ,__UpperCamelCase = None ,__UpperCamelCase = None ,**__UpperCamelCase ,) -> Any: '''simple docstring''' super().__init__(feature_size=__UpperCamelCase ,sampling_rate=__UpperCamelCase ,padding_value=__UpperCamelCase ,**__UpperCamelCase ) lowercase_ : List[str] = chunk_length_s lowercase_ : Tuple = overlap @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) ) def __call__( self ,__UpperCamelCase ,__UpperCamelCase = None ,__UpperCamelCase = False ,__UpperCamelCase = None ,__UpperCamelCase = None ,__UpperCamelCase = None ,) -> BatchFeature: '''simple docstring''' if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( f'''The model corresponding to this feature extractor: {self} was trained using a sampling rate of''' f''' {self.sampling_rate}. Please make sure that the provided audio input was sampled with''' f''' {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( 'It is strongly recommended to pass the `sampling_rate` argument to this function. ' 'Failing to do so can result in silent errors that might be hard to debug.' ) if padding and truncation: raise ValueError('Both padding and truncation were set. Make sure you only set one.' ) elif padding is None: # by default let's pad the inputs lowercase_ : Optional[int] = True lowercase_ : Optional[int] = bool( isinstance(__UpperCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) ) if is_batched: lowercase_ : int = [np.asarray(__UpperCamelCase ,dtype=np.floataa ).T for audio in raw_audio] elif not is_batched and not isinstance(__UpperCamelCase ,np.ndarray ): lowercase_ : Any = np.asarray(__UpperCamelCase ,dtype=np.floataa ) elif isinstance(__UpperCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ): lowercase_ : List[str] = raw_audio.astype(np.floataa ) # always return batch if not is_batched: lowercase_ : Dict = [np.asarray(__UpperCamelCase ).T] # verify inputs are valid for idx, example in enumerate(__UpperCamelCase ): if example.ndim > 2: raise ValueError(f'''Expected input shape (channels, length) but got shape {example.shape}''' ) if self.feature_size == 1 and example.ndim != 1: raise ValueError(f'''Expected mono audio but example has {example.shape[-1]} channels''' ) if self.feature_size == 2 and example.shape[-1] != 2: raise ValueError(f'''Expected stereo audio but example has {example.shape[-1]} channels''' ) lowercase_ : Optional[int] = None lowercase_ : List[Any] = BatchFeature({'input_values': raw_audio} ) if self.chunk_stride is not None and self.chunk_length is not None and max_length is None: if truncation: lowercase_ : List[Any] = min(array.shape[0] for array in raw_audio ) lowercase_ : int = int(np.floor(max_length / self.chunk_stride ) ) lowercase_ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length elif padding: lowercase_ : List[Any] = max(array.shape[0] for array in raw_audio ) lowercase_ : Tuple = int(np.ceil(max_length / self.chunk_stride ) ) lowercase_ : List[str] = (nb_step - 1) * self.chunk_stride + self.chunk_length lowercase_ : Union[str, Any] = 'max_length' else: lowercase_ : int = input_values # normal padding on batch if padded_inputs is None: lowercase_ : int = self.pad( __UpperCamelCase ,max_length=__UpperCamelCase ,truncation=__UpperCamelCase ,padding=__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,) if padding: lowercase_ : Optional[int] = padded_inputs.pop('attention_mask' ) lowercase_ : Dict = [] for example in padded_inputs.pop('input_values' ): if self.feature_size == 1: lowercase_ : Optional[int] = example[..., None] input_values.append(example.T ) lowercase_ : str = input_values if return_tensors is not None: lowercase_ : List[Any] = padded_inputs.convert_to_tensors(__UpperCamelCase ) return padded_inputs
321
1
"""simple docstring""" import numpy as np def lowercase__( __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : float = 1E-12 , __SCREAMING_SNAKE_CASE : int = 1_00 , ): assert np.shape(__SCREAMING_SNAKE_CASE )[0] == np.shape(__SCREAMING_SNAKE_CASE )[1] # Ensure proper dimensionality. assert np.shape(__SCREAMING_SNAKE_CASE )[0] == np.shape(__SCREAMING_SNAKE_CASE )[0] # Ensure inputs are either both complex or both real assert np.iscomplexobj(__SCREAMING_SNAKE_CASE ) == np.iscomplexobj(__SCREAMING_SNAKE_CASE ) lowercase_ : List[str] = np.iscomplexobj(__SCREAMING_SNAKE_CASE ) if is_complex: # Ensure complex input_matrix is Hermitian assert np.array_equal(__SCREAMING_SNAKE_CASE , input_matrix.conj().T ) # Set convergence to False. Will define convergence when we exceed max_iterations # or when we have small changes from one iteration to next. lowercase_ : Union[str, Any] = False lowercase_ : int = 0 lowercase_ : Dict = 0 lowercase_ : Tuple = 1E12 while not convergence: # Multiple matrix by the vector. lowercase_ : str = np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Normalize the resulting output vector. lowercase_ : Union[str, Any] = w / np.linalg.norm(__SCREAMING_SNAKE_CASE ) # Find rayleigh quotient # (faster than usual b/c we know vector is normalized already) lowercase_ : Union[str, Any] = vector.conj().T if is_complex else vector.T lowercase_ : Union[str, Any] = np.dot(__SCREAMING_SNAKE_CASE , np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # Check convergence. lowercase_ : Optional[int] = np.abs(lambda_ - lambda_previous ) / lambda_ iterations += 1 if error <= error_tol or iterations >= max_iterations: lowercase_ : Union[str, Any] = True lowercase_ : Optional[int] = lambda_ if is_complex: lowercase_ : List[Any] = np.real(lambda_ ) return lambda_, vector def lowercase__( ): lowercase_ : Tuple = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] ) lowercase_ : List[str] = np.array([41, 4, 20] ) lowercase_ : Any = real_input_matrix.astype(np.complexaaa ) lowercase_ : Union[str, Any] = np.triu(1J * complex_input_matrix , 1 ) complex_input_matrix += imag_matrix complex_input_matrix += -1 * imag_matrix.T lowercase_ : str = np.array([41, 4, 20] ).astype(np.complexaaa ) for problem_type in ["real", "complex"]: if problem_type == "real": lowercase_ : Optional[int] = real_input_matrix lowercase_ : str = real_vector elif problem_type == "complex": lowercase_ : Optional[int] = complex_input_matrix lowercase_ : Optional[Any] = complex_vector # Our implementation. lowercase_ , lowercase_ : Optional[Any] = power_iteration(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Numpy implementation. # Get eigenvalues and eigenvectors using built-in numpy # eigh (eigh used for symmetric or hermetian matrices). lowercase_ , lowercase_ : List[Any] = np.linalg.eigh(__SCREAMING_SNAKE_CASE ) # Last eigenvalue is the maximum one. lowercase_ : List[Any] = eigen_values[-1] # Last column in this matrix is eigenvector corresponding to largest eigenvalue. lowercase_ : Optional[int] = eigen_vectors[:, -1] # Check our implementation and numpy gives close answers. assert np.abs(eigen_value - eigen_value_max ) <= 1E-6 # Take absolute values element wise of each eigenvector. # as they are only unique to a minus sign. assert np.linalg.norm(np.abs(__SCREAMING_SNAKE_CASE ) - np.abs(__SCREAMING_SNAKE_CASE ) ) <= 1E-6 if __name__ == "__main__": import doctest doctest.testmod() test_power_iteration()
321
"""simple docstring""" # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __SCREAMING_SNAKE_CASE ={"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __SCREAMING_SNAKE_CASE =[ "MRA_PRETRAINED_MODEL_ARCHIVE_LIST", "MraForMaskedLM", "MraForMultipleChoice", "MraForQuestionAnswering", "MraForSequenceClassification", "MraForTokenClassification", "MraLayer", "MraModel", "MraPreTrainedModel", ] if TYPE_CHECKING: from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mra import ( MRA_PRETRAINED_MODEL_ARCHIVE_LIST, MraForMaskedLM, MraForMultipleChoice, MraForQuestionAnswering, MraForSequenceClassification, MraForTokenClassification, MraLayer, MraModel, MraPreTrainedModel, ) else: import sys __SCREAMING_SNAKE_CASE =_LazyModule(__name__, globals()["__file__"], _import_structure)
321
1
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any=False , __SCREAMING_SNAKE_CASE : Dict=False ): lowercase_ : int = 'backbone.' if is_semantic else '' lowercase_ : List[str] = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''{prefix}blocks.{i}.norm1.weight''', F'''beit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm1.bias''', F'''beit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.weight''', F'''beit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append( (F'''{prefix}blocks.{i}.attn.proj.bias''', F'''beit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.weight''', F'''beit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.norm2.bias''', F'''beit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.weight''', F'''beit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc1.bias''', F'''beit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.weight''', F'''beit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''{prefix}blocks.{i}.mlp.fc2.bias''', F'''beit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ (F'''{prefix}cls_token''', 'beit.embeddings.cls_token'), (F'''{prefix}patch_embed.proj.weight''', 'beit.embeddings.patch_embeddings.projection.weight'), (F'''{prefix}patch_embed.proj.bias''', 'beit.embeddings.patch_embeddings.projection.bias'), (F'''{prefix}pos_embed''', 'beit.embeddings.position_embeddings'), ] ) if has_lm_head: # mask token + layernorm rename_keys.extend( [ ('mask_token', 'beit.embeddings.mask_token'), ('norm.weight', 'layernorm.weight'), ('norm.bias', 'layernorm.bias'), ] ) else: # layernorm + classification head rename_keys.extend( [ ('fc_norm.weight', 'beit.pooler.layernorm.weight'), ('fc_norm.bias', 'beit.pooler.layernorm.bias'), ('head.weight', 'classifier.weight'), ('head.bias', 'classifier.bias'), ] ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[str]=False , __SCREAMING_SNAKE_CASE : List[Any]=False ): for i in range(config.num_hidden_layers ): lowercase_ : Any = 'backbone.' if is_semantic else '' # queries, keys and values lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.qkv.weight''' ) lowercase_ : List[Any] = state_dict.pop(F'''{prefix}blocks.{i}.attn.q_bias''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.attn.v_bias''' ) lowercase_ : List[str] = in_proj_weight[ : config.hidden_size, : ] lowercase_ : List[str] = q_bias lowercase_ : List[str] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowercase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowercase_ : Any = v_bias # gamma_1 and gamma_2 # we call them lambda because otherwise they are renamed when using .from_pretrained lowercase_ : Any = state_dict.pop(F'''{prefix}blocks.{i}.gamma_1''' ) lowercase_ : int = state_dict.pop(F'''{prefix}blocks.{i}.gamma_2''' ) lowercase_ : Tuple = gamma_a lowercase_ : List[Any] = gamma_a def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Any = val def lowercase__( ): lowercase_ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg' lowercase_ : Any = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any=False ): lowercase_ : List[str] = False if 'rvlcdip' in checkpoint_url else True lowercase_ : Dict = BeitConfig(use_absolute_position_embeddings=__SCREAMING_SNAKE_CASE , use_mask_token=__SCREAMING_SNAKE_CASE ) # size of the architecture if "large" in checkpoint_url or "dit-l" in checkpoint_url: lowercase_ : Any = 10_24 lowercase_ : List[str] = 40_96 lowercase_ : Tuple = 24 lowercase_ : Union[str, Any] = 16 # labels if "rvlcdip" in checkpoint_url: lowercase_ : Optional[Any] = 16 lowercase_ : Any = 'huggingface/label-files' lowercase_ : int = 'rvlcdip-id2label.json' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Dict = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : str = idalabel lowercase_ : str = {v: k for k, v in idalabel.items()} # load state_dict of original model, remove and rename some keys lowercase_ : Dict = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , map_location='cpu' )['model'] lowercase_ : Optional[Any] = create_rename_keys(__SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , has_lm_head=__SCREAMING_SNAKE_CASE ) # load HuggingFace model lowercase_ : Optional[int] = BeitForMaskedImageModeling(__SCREAMING_SNAKE_CASE ) if has_lm_head else BeitForImageClassification(__SCREAMING_SNAKE_CASE ) model.eval() model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image lowercase_ : List[Any] = BeitImageProcessor( size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=__SCREAMING_SNAKE_CASE ) lowercase_ : str = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors='pt' ) lowercase_ : int = encoding['pixel_values'] lowercase_ : Any = model(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = outputs.logits # verify logits lowercase_ : Optional[Any] = [1, 16] if 'rvlcdip' in checkpoint_url else [1, 1_96, 81_92] assert logits.shape == torch.Size(__SCREAMING_SNAKE_CASE ), "Shape of logits not as expected" Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: if has_lm_head: lowercase_ : List[str] = 'dit-base' if 'base' in checkpoint_url else 'dit-large' else: lowercase_ : List[str] = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip' image_processor.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) model.push_to_hub( repo_path_or_name=Path(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=__SCREAMING_SNAKE_CASE , ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() parser.add_argument( "--checkpoint_url", default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth", type=str, help="URL to the original PyTorch checkpoint (.pth file).", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model." ) parser.add_argument( "--push_to_hub", action="store_true", ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
321
1
"""simple docstring""" from __future__ import annotations import copy import inspect import json import math import os import tempfile import unittest from importlib import import_module import numpy as np from transformers import ViTMAEConfig from transformers.file_utils import cached_property, is_tf_available, is_vision_available from transformers.testing_utils import require_tf, require_vision, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFViTMAEForPreTraining, TFViTMAEModel if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase=13 ,__UpperCamelCase=30 ,__UpperCamelCase=2 ,__UpperCamelCase=3 ,__UpperCamelCase=True ,__UpperCamelCase=True ,__UpperCamelCase=32 ,__UpperCamelCase=2 ,__UpperCamelCase=4 ,__UpperCamelCase=37 ,__UpperCamelCase="gelu" ,__UpperCamelCase=0.1 ,__UpperCamelCase=0.1 ,__UpperCamelCase=10 ,__UpperCamelCase=0.02 ,__UpperCamelCase=3 ,__UpperCamelCase=0.6 ,__UpperCamelCase=None ,) -> Any: '''simple docstring''' lowercase_ : Optional[int] = parent lowercase_ : Union[str, Any] = batch_size lowercase_ : str = image_size lowercase_ : str = patch_size lowercase_ : int = num_channels lowercase_ : Tuple = is_training lowercase_ : Optional[int] = use_labels lowercase_ : List[str] = hidden_size lowercase_ : Optional[int] = num_hidden_layers lowercase_ : Optional[Any] = num_attention_heads lowercase_ : Dict = intermediate_size lowercase_ : int = hidden_act lowercase_ : List[str] = hidden_dropout_prob lowercase_ : str = attention_probs_dropout_prob lowercase_ : List[Any] = type_sequence_label_size lowercase_ : int = initializer_range lowercase_ : Any = mask_ratio lowercase_ : Optional[int] = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) lowercase_ : List[str] = (image_size // patch_size) ** 2 lowercase_ : Union[str, Any] = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase_ : Tuple = None if self.use_labels: lowercase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase_ : List[Any] = self.get_config() return config, pixel_values, labels def _UpperCAmelCase ( self ) -> int: '''simple docstring''' return ViTMAEConfig( image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,decoder_hidden_size=self.hidden_size ,decoder_num_hidden_layers=self.num_hidden_layers ,decoder_num_attention_heads=self.num_attention_heads ,decoder_intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__UpperCamelCase ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : List[str] = TFViTMAEModel(config=__UpperCamelCase ) lowercase_ : str = model(__UpperCamelCase ,training=__UpperCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' lowercase_ : str = TFViTMAEForPreTraining(__UpperCamelCase ) lowercase_ : List[str] = model(__UpperCamelCase ,training=__UpperCamelCase ) # expected sequence length = num_patches lowercase_ : List[str] = (self.image_size // self.patch_size) ** 2 lowercase_ : List[Any] = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) # test greyscale images lowercase_ : Optional[Any] = 1 lowercase_ : List[Any] = TFViTMAEForPreTraining(__UpperCamelCase ) lowercase_ : Optional[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) lowercase_ : Tuple = model(__UpperCamelCase ,training=__UpperCamelCase ) lowercase_ : str = self.patch_size**2 self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Optional[int] = self.prepare_config_and_inputs() ((lowercase_) , (lowercase_) , (lowercase_)) : int = config_and_inputs lowercase_ : List[str] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class UpperCamelCase ( lowercase_ , lowercase_ , unittest.TestCase ): lowercase = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else () lowercase = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {} lowercase = False lowercase = False lowercase = False lowercase = False def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Tuple = TFViTMAEModelTester(self ) lowercase_ : int = ConfigTester(self ,config_class=__UpperCamelCase ,has_text_modality=__UpperCamelCase ,hidden_size=37 ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason='ViTMAE does not use inputs_embeds' ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' pass def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ , lowercase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : str = model_class(__UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) ) lowercase_ : Optional[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__UpperCamelCase ,tf.keras.layers.Layer ) ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase_ : List[str] = model_class(__UpperCamelCase ) lowercase_ : Any = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase_ : Union[str, Any] = [*signature.parameters.keys()] lowercase_ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' np.random.seed(2 ) lowercase_ , lowercase_ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase_ : Any = model_class(__UpperCamelCase ) lowercase_ : List[str] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Union[str, Any] = model(__UpperCamelCase ,noise=__UpperCamelCase ) lowercase_ : Any = copy.deepcopy(self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) ) lowercase_ : Tuple = model(**__UpperCamelCase ,noise=__UpperCamelCase ) lowercase_ : List[Any] = outputs_dict[0].numpy() lowercase_ : int = outputs_keywords[0].numpy() self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) ,1e-6 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' np.random.seed(2 ) lowercase_ , lowercase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Optional[Any] = int((config.image_size // config.patch_size) ** 2 ) lowercase_ : Any = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) def prepare_numpy_arrays(__UpperCamelCase ): lowercase_ : Optional[int] = {} for k, v in inputs_dict.items(): if tf.is_tensor(__UpperCamelCase ): lowercase_ : Dict = v.numpy() else: lowercase_ : str = np.array(__UpperCamelCase ) return inputs_np_dict for model_class in self.all_model_classes: lowercase_ : Optional[int] = model_class(__UpperCamelCase ) lowercase_ : List[str] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Tuple = prepare_numpy_arrays(__UpperCamelCase ) lowercase_ : Any = model(__UpperCamelCase ,noise=__UpperCamelCase ) lowercase_ : Dict = model(**__UpperCamelCase ,noise=__UpperCamelCase ) self.assert_outputs_same(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]: '''simple docstring''' np.random.seed(2 ) lowercase_ : Optional[Any] = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 ) lowercase_ : List[Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase_ : str = tf.constant(__UpperCamelCase ) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument lowercase_ : List[Any] = tf_noise super().check_pt_tf_models(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> int: '''simple docstring''' np.random.seed(2 ) lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[Any] = { module_member for model_class in self.all_model_classes for module in (import_module(model_class.__module__ ),) for module_member_name in dir(__UpperCamelCase ) if module_member_name.endswith('MainLayer' ) # This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`. and module_member_name[: -len('MainLayer' )] == model_class.__name__[: -len('Model' )] for module_member in (getattr(__UpperCamelCase ,__UpperCamelCase ),) if isinstance(__UpperCamelCase ,__UpperCamelCase ) and tf.keras.layers.Layer in module_member.__bases__ and getattr(__UpperCamelCase ,'_keras_serializable' ,__UpperCamelCase ) } lowercase_ : Tuple = int((config.image_size // config.patch_size) ** 2 ) lowercase_ : str = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) lowercase_ : Union[str, Any] = tf.convert_to_tensor(__UpperCamelCase ) inputs_dict.update({'noise': noise} ) for main_layer_class in tf_main_layer_classes: lowercase_ : Any = main_layer_class(__UpperCamelCase ) lowercase_ : List[Any] = { name: tf.keras.Input(tensor.shape[1:] ,dtype=tensor.dtype ) for name, tensor in inputs_dict.items() } lowercase_ : Optional[Any] = tf.keras.Model(__UpperCamelCase ,outputs=main_layer(__UpperCamelCase ) ) lowercase_ : Tuple = model(__UpperCamelCase ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase_ : List[Any] = os.path.join(__UpperCamelCase ,'keras_model.h5' ) model.save(__UpperCamelCase ) lowercase_ : Union[str, Any] = tf.keras.models.load_model( __UpperCamelCase ,custom_objects={main_layer_class.__name__: main_layer_class} ) assert isinstance(__UpperCamelCase ,tf.keras.Model ) lowercase_ : List[Any] = model(__UpperCamelCase ) self.assert_outputs_same(__UpperCamelCase ,__UpperCamelCase ) @slow def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' np.random.seed(2 ) lowercase_ , lowercase_ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : Any = int((config.image_size // config.patch_size) ** 2 ) lowercase_ : Dict = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase_ : Dict = model_class(__UpperCamelCase ) lowercase_ : Tuple = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : int = model(__UpperCamelCase ,noise=__UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": lowercase_ : Dict = outputs.last_hidden_state.numpy() lowercase_ : Union[str, Any] = 0 else: lowercase_ : List[Any] = outputs.logits.numpy() lowercase_ : Optional[int] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(__UpperCamelCase ,saved_model=__UpperCamelCase ) lowercase_ : str = model_class.from_pretrained(__UpperCamelCase ) lowercase_ : str = model(__UpperCamelCase ,noise=__UpperCamelCase ) if model_class.__name__ == "TFViTMAEModel": lowercase_ : List[str] = after_outputs['last_hidden_state'].numpy() lowercase_ : Union[str, Any] = 0 else: lowercase_ : int = after_outputs['logits'].numpy() lowercase_ : Union[str, Any] = 0 lowercase_ : Tuple = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(__UpperCamelCase ,1e-5 ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' np.random.seed(2 ) lowercase_ , lowercase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() lowercase_ : List[str] = int((config.image_size // config.patch_size) ** 2 ) lowercase_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) ) for model_class in self.all_model_classes: lowercase_ : int = model_class(__UpperCamelCase ) lowercase_ : Union[str, Any] = self._prepare_for_class(__UpperCamelCase ,__UpperCamelCase ) lowercase_ : Dict = model(__UpperCamelCase ,noise=__UpperCamelCase ) lowercase_ : Optional[int] = model.get_config() # make sure that returned config is jsonifiable, which is required by keras json.dumps(__UpperCamelCase ) lowercase_ : Union[str, Any] = model_class.from_config(model.get_config() ) # make sure it also accepts a normal config lowercase_ : str = model_class.from_config(model.config ) lowercase_ : List[str] = new_model(__UpperCamelCase ) # Build model new_model.set_weights(model.get_weights() ) lowercase_ : Tuple = new_model(__UpperCamelCase ,noise=__UpperCamelCase ) self.assert_outputs_same(__UpperCamelCase ,__UpperCamelCase ) @unittest.skip( reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results.' ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' pass @unittest.skip(reason='ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load' ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' pass @slow def _UpperCAmelCase ( self ) -> int: '''simple docstring''' lowercase_ : int = TFViTMAEModel.from_pretrained('google/vit-base-patch16-224' ) self.assertIsNotNone(__UpperCamelCase ) def lowercase__( ): lowercase_ : str = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class UpperCamelCase ( unittest.TestCase ): @cached_property def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' return ViTImageProcessor.from_pretrained('facebook/vit-mae-base' ) if is_vision_available() else None @slow def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' np.random.seed(2 ) lowercase_ : List[Any] = TFViTMAEForPreTraining.from_pretrained('facebook/vit-mae-base' ) lowercase_ : Optional[int] = self.default_image_processor lowercase_ : List[str] = prepare_img() lowercase_ : Optional[Any] = image_processor(images=__UpperCamelCase ,return_tensors='tf' ) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) lowercase_ : Any = ViTMAEConfig() lowercase_ : List[str] = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 ) lowercase_ : Optional[int] = np.random.uniform(size=(1, num_patches) ) # forward pass lowercase_ : str = model(**__UpperCamelCase ,noise=__UpperCamelCase ) # verify the logits lowercase_ : Optional[Any] = tf.convert_to_tensor([1, 196, 768] ) self.assertEqual(outputs.logits.shape ,__UpperCamelCase ) lowercase_ : Dict = tf.convert_to_tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) tf.debugging.assert_near(outputs.logits[0, :3, :3] ,__UpperCamelCase ,atol=1e-4 )
321
"""simple docstring""" __SCREAMING_SNAKE_CASE ={ "a": "AAAAA", "b": "AAAAB", "c": "AAABA", "d": "AAABB", "e": "AABAA", "f": "AABAB", "g": "AABBA", "h": "AABBB", "i": "ABAAA", "j": "BBBAA", "k": "ABAAB", "l": "ABABA", "m": "ABABB", "n": "ABBAA", "o": "ABBAB", "p": "ABBBA", "q": "ABBBB", "r": "BAAAA", "s": "BAAAB", "t": "BAABA", "u": "BAABB", "v": "BBBAB", "w": "BABAA", "x": "BABAB", "y": "BABBA", "z": "BABBB", " ": " ", } __SCREAMING_SNAKE_CASE ={value: key for key, value in encode_dict.items()} def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : Union[str, Any] = '' for letter in word.lower(): if letter.isalpha() or letter == " ": encoded += encode_dict[letter] else: raise Exception('encode() accepts only letters of the alphabet and spaces' ) return encoded def lowercase__( __SCREAMING_SNAKE_CASE : str ): if set(__SCREAMING_SNAKE_CASE ) - {"A", "B", " "} != set(): raise Exception('decode() accepts only \'A\', \'B\' and spaces' ) lowercase_ : Dict = '' for word in coded.split(): while len(__SCREAMING_SNAKE_CASE ) != 0: decoded += decode_dict[word[:5]] lowercase_ : Any = word[5:] decoded += " " return decoded.strip() if __name__ == "__main__": from doctest import testmod testmod()
321
1
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = hex_num.strip() if not hex_num: raise ValueError('No value was passed to the function' ) lowercase_ : Dict = hex_num[0] == '-' if is_negative: lowercase_ : str = hex_num[1:] try: lowercase_ : Any = int(__SCREAMING_SNAKE_CASE , 16 ) except ValueError: raise ValueError('Invalid value was passed to the function' ) lowercase_ : List[Any] = '' while int_num > 0: lowercase_ : Optional[int] = str(int_num % 2 ) + bin_str int_num >>= 1 return int(('-' + bin_str) if is_negative else bin_str ) if __name__ == "__main__": import doctest doctest.testmod()
321
"""simple docstring""" def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations(__SCREAMING_SNAKE_CASE : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): def count_of_possible_combinations_with_dp_array( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase_ : str = sum( count_of_possible_combinations_with_dp_array(target - item , __SCREAMING_SNAKE_CASE ) for item in array ) lowercase_ : Tuple = answer return answer lowercase_ : Optional[Any] = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : list[int] , __SCREAMING_SNAKE_CASE : int ): lowercase_ : Dict = [0] * (target + 1) lowercase_ : Dict = 1 for i in range(1 , target + 1 ): for j in range(__SCREAMING_SNAKE_CASE ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() __SCREAMING_SNAKE_CASE =3 __SCREAMING_SNAKE_CASE =5 __SCREAMING_SNAKE_CASE =[1, 2, 5] print(combination_sum_iv(n, array, target))
321
1
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore __SCREAMING_SNAKE_CASE =namedtuple("covid_data", "cases deaths recovered") def lowercase__( __SCREAMING_SNAKE_CASE : str = "https://www.worldometers.info/coronavirus/" ): lowercase_ : Union[str, Any] = '//div[@class = "maincounter-number"]/span/text()' return covid_data(*html.fromstring(requests.get(__SCREAMING_SNAKE_CASE ).content ).xpath(__SCREAMING_SNAKE_CASE ) ) __SCREAMING_SNAKE_CASE ="Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> None: '''simple docstring''' lowercase_ : int = set_counts lowercase_ : List[Any] = max(__UpperCamelCase ) lowercase_ : Union[str, Any] = len(__UpperCamelCase ) lowercase_ : Dict = [1] * num_sets lowercase_ : Optional[int] = list(range(__UpperCamelCase ) ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> bool: '''simple docstring''' lowercase_ : Optional[int] = self.get_parent(__UpperCamelCase ) lowercase_ : int = self.get_parent(__UpperCamelCase ) if src_parent == dst_parent: return False if self.ranks[dst_parent] >= self.ranks[src_parent]: self.set_counts[dst_parent] += self.set_counts[src_parent] lowercase_ : Tuple = 0 lowercase_ : str = dst_parent if self.ranks[dst_parent] == self.ranks[src_parent]: self.ranks[dst_parent] += 1 lowercase_ : Union[str, Any] = self.set_counts[dst_parent] else: self.set_counts[src_parent] += self.set_counts[dst_parent] lowercase_ : str = 0 lowercase_ : Tuple = src_parent lowercase_ : int = self.set_counts[src_parent] lowercase_ : str = max(self.max_set ,__UpperCamelCase ) return True def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' if self.parents[disj_set] == disj_set: return disj_set lowercase_ : Union[str, Any] = self.get_parent(self.parents[disj_set] ) return self.parents[disj_set]
321
1
"""simple docstring""" import math from datetime import datetime, timedelta def lowercase__( __SCREAMING_SNAKE_CASE : int ): lowercase_ : int = year % 19 lowercase_ : List[Any] = year % 4 lowercase_ : List[str] = year % 7 lowercase_ : str = math.floor(year / 1_00 ) lowercase_ : Any = math.floor((13 + 8 * leap_day_inhibits) / 25 ) lowercase_ : Optional[int] = leap_day_inhibits / 4 lowercase_ : str = ( 15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number ) % 30 lowercase_ : List[Any] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7 # days to be added to March 21 lowercase_ : str = (19 * metonic_cycle + secular_moon_shift) % 30 # PHM -> Paschal Full Moon lowercase_ : List[str] = ( 2 * julian_leap_year + 4 * non_leap_year + 6 * days_to_add + century_starting_point ) % 7 if days_to_add == 29 and days_from_phm_to_sunday == 6: return datetime(__SCREAMING_SNAKE_CASE , 4 , 19 ) elif days_to_add == 28 and days_from_phm_to_sunday == 6: return datetime(__SCREAMING_SNAKE_CASE , 4 , 18 ) else: return datetime(__SCREAMING_SNAKE_CASE , 3 , 22 ) + timedelta( days=int(days_to_add + days_from_phm_to_sunday ) ) if __name__ == "__main__": for year in (1994, 2000, 2010, 2021, 2023): __SCREAMING_SNAKE_CASE ="will be" if year > datetime.now().year else "was" print(F"Easter in {year} {tense} {gauss_easter(year)}")
321
"""simple docstring""" import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers, processors from ...tokenization_utils_base import AddedToken, BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_blenderbot import BlenderbotTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) __SCREAMING_SNAKE_CASE ={ "vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_config_file": "tokenizer_config.json", } __SCREAMING_SNAKE_CASE ={ "vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"}, "merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"}, "tokenizer_config_file": { "facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json" }, } __SCREAMING_SNAKE_CASE ={"facebook/blenderbot-3B": 128} class UpperCamelCase ( lowercase_ ): lowercase = VOCAB_FILES_NAMES lowercase = PRETRAINED_VOCAB_FILES_MAP lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowercase = ['input_ids', 'attention_mask'] lowercase = BlenderbotTokenizer def __init__( self ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase=None ,__UpperCamelCase="replace" ,__UpperCamelCase="<s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="</s>" ,__UpperCamelCase="<s>" ,__UpperCamelCase="<unk>" ,__UpperCamelCase="<pad>" ,__UpperCamelCase="<mask>" ,__UpperCamelCase=False ,__UpperCamelCase=True ,**__UpperCamelCase ,) -> Optional[int]: '''simple docstring''' super().__init__( __UpperCamelCase ,__UpperCamelCase ,tokenizer_file=__UpperCamelCase ,errors=__UpperCamelCase ,bos_token=__UpperCamelCase ,eos_token=__UpperCamelCase ,sep_token=__UpperCamelCase ,cls_token=__UpperCamelCase ,unk_token=__UpperCamelCase ,pad_token=__UpperCamelCase ,mask_token=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase ,**__UpperCamelCase ,) lowercase_ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,pre_tok_state.pop('type' ) ) lowercase_ : Any = add_prefix_space lowercase_ : Tuple = pre_tok_class(**__UpperCamelCase ) lowercase_ : int = add_prefix_space lowercase_ : Any = 'post_processor' lowercase_ : Optional[Any] = getattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) if tokenizer_component_instance: lowercase_ : Tuple = json.loads(tokenizer_component_instance.__getstate__() ) # The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class` if "sep" in state: lowercase_ : str = tuple(state['sep'] ) if "cls" in state: lowercase_ : Union[str, Any] = tuple(state['cls'] ) lowercase_ : str = False if state.get('add_prefix_space' ,__UpperCamelCase ) != add_prefix_space: lowercase_ : Dict = add_prefix_space lowercase_ : int = True if state.get('trim_offsets' ,__UpperCamelCase ) != trim_offsets: lowercase_ : Optional[Any] = trim_offsets lowercase_ : Tuple = True if changes_to_apply: lowercase_ : Union[str, Any] = getattr(__UpperCamelCase ,state.pop('type' ) ) lowercase_ : Union[str, Any] = component_class(**__UpperCamelCase ) setattr(self.backend_tokenizer ,__UpperCamelCase ,__UpperCamelCase ) @property # Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot def _UpperCAmelCase ( self ) -> str: '''simple docstring''' if self._mask_token is None: if self.verbose: logger.error('Using mask_token, but it is not set yet.' ) return None return str(self._mask_token ) @mask_token.setter def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' lowercase_ : Any = AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else value lowercase_ : str = value def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : Optional[int] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,*__UpperCamelCase ,**__UpperCamelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ : List[str] = kwargs.get('is_split_into_words' ,__UpperCamelCase ) assert self.add_prefix_space or not is_split_into_words, ( f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*__UpperCamelCase ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ : Any = self._tokenizer.model.save(__UpperCamelCase ,name=__UpperCamelCase ) return tuple(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> List[int]: '''simple docstring''' lowercase_ : int = [self.sep_token_id] lowercase_ : List[str] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase = None ) -> Any: '''simple docstring''' return token_ids_a + [self.eos_token_id] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[int]: '''simple docstring''' lowercase_ : Optional[Any] = [] for is_user, text in conversation.iter_texts(): if is_user: # We need to space prefix as it's being done within blenderbot inputs.append(' ' + text ) else: # Generated responses should contain them already. inputs.append(__UpperCamelCase ) lowercase_ : Dict = ' '.join(__UpperCamelCase ) lowercase_ : str = self.encode(__UpperCamelCase ) if len(__UpperCamelCase ) > self.model_max_length: lowercase_ : List[str] = input_ids[-self.model_max_length :] logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' ) return input_ids
321
1
"""simple docstring""" import sys from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers __SCREAMING_SNAKE_CASE ="python tqdm regex requests packaging filelock numpy tokenizers".split() if sys.version_info < (3, 7): pkgs_to_check_at_runtime.append("dataclasses") if sys.version_info < (3, 8): pkgs_to_check_at_runtime.append("importlib_metadata") for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py") def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : str=None ): require_version(deps[pkg] , __SCREAMING_SNAKE_CASE )
321
"""simple docstring""" import os import sys import unittest __SCREAMING_SNAKE_CASE =os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, "utils")) import get_test_info # noqa: E402 from get_test_info import ( # noqa: E402 get_model_to_test_mapping, get_model_to_tester_mapping, get_test_to_tester_mapping, ) __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "bert", "test_modeling_bert.py") __SCREAMING_SNAKE_CASE =os.path.join("tests", "models", "blip", "test_modeling_blip.py") class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Tuple = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : Optional[int] = get_test_to_tester_mapping(__UpperCamelCase ) lowercase_ : List[str] = {'BertModelTest': 'BertModelTester'} lowercase_ : Union[str, Any] = { 'BlipModelTest': 'BlipModelTester', 'BlipTextImageModelTest': 'BlipTextImageModelsModelTester', 'BlipTextModelTest': 'BlipTextModelTester', 'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester', 'BlipVQAModelTest': 'BlipVQAModelTester', 'BlipVisionModelTest': 'BlipVisionModelTester', } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[Any]: '''simple docstring''' lowercase_ : Optional[Any] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : List[str] = get_model_to_test_mapping(__UpperCamelCase ) lowercase_ : Any = { 'BertForMaskedLM': ['BertModelTest'], 'BertForMultipleChoice': ['BertModelTest'], 'BertForNextSentencePrediction': ['BertModelTest'], 'BertForPreTraining': ['BertModelTest'], 'BertForQuestionAnswering': ['BertModelTest'], 'BertForSequenceClassification': ['BertModelTest'], 'BertForTokenClassification': ['BertModelTest'], 'BertLMHeadModel': ['BertModelTest'], 'BertModel': ['BertModelTest'], } lowercase_ : Any = { 'BlipForConditionalGeneration': ['BlipTextImageModelTest'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'], 'BlipForQuestionAnswering': ['BlipVQAModelTest'], 'BlipModel': ['BlipModelTest'], 'BlipTextModel': ['BlipTextModelTest'], 'BlipVisionModel': ['BlipVisionModelTest'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Any: '''simple docstring''' lowercase_ : List[str] = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Dict = get_model_to_tester_mapping(__UpperCamelCase ) lowercase_ : Tuple = { 'BertForMaskedLM': ['BertModelTester'], 'BertForMultipleChoice': ['BertModelTester'], 'BertForNextSentencePrediction': ['BertModelTester'], 'BertForPreTraining': ['BertModelTester'], 'BertForQuestionAnswering': ['BertModelTester'], 'BertForSequenceClassification': ['BertModelTester'], 'BertForTokenClassification': ['BertModelTester'], 'BertLMHeadModel': ['BertModelTester'], 'BertModel': ['BertModelTester'], } lowercase_ : Optional[Any] = { 'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'], 'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'], 'BlipForQuestionAnswering': ['BlipVQAModelTester'], 'BlipModel': ['BlipModelTester'], 'BlipTextModel': ['BlipTextModelTester'], 'BlipVisionModel': ['BlipVisionModelTester'], } self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase ) self.assertEqual(get_test_info.to_json(__UpperCamelCase ) ,__UpperCamelCase )
321
1
"""simple docstring""" import argparse import collections import json from pathlib import Path import requests import torch import yaml from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileViTImageProcessor, MobileViTVaConfig, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, ) from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : List[str] ): print('Loading config file...' ) def flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str]="" , __SCREAMING_SNAKE_CASE : Union[str, Any]="." ): lowercase_ : Optional[Any] = [] for k, v in d.items(): lowercase_ : str = parent_key + sep + k if parent_key else k if isinstance(__SCREAMING_SNAKE_CASE , collections.abc.MutableMapping ): items.extend(flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , sep=__SCREAMING_SNAKE_CASE ).items() ) else: items.append((new_key, v) ) return dict(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = argparse.Namespace() with open(__SCREAMING_SNAKE_CASE , 'r' ) as yaml_file: try: lowercase_ : int = yaml.load(__SCREAMING_SNAKE_CASE , Loader=yaml.FullLoader ) lowercase_ : List[str] = flatten_yaml_as_dict(__SCREAMING_SNAKE_CASE ) for k, v in flat_cfg.items(): setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) except yaml.YAMLError as exc: logger.error('Error while loading config file: {}. Error message: {}'.format(__SCREAMING_SNAKE_CASE , str(__SCREAMING_SNAKE_CASE ) ) ) return config def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Optional[int] ): lowercase_ : Optional[Any] = MobileViTVaConfig() lowercase_ : Optional[int] = False # dataset if task_name.startswith('imagenet1k_' ): lowercase_ : int = 10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase_ : Optional[Any] = 3_84 else: lowercase_ : Dict = 2_56 lowercase_ : List[Any] = 'imagenet-1k-id2label.json' elif task_name.startswith('imagenet21k_to_1k_' ): lowercase_ : Optional[Any] = 2_10_00 if int(task_name.strip().split('_' )[-1] ) == 3_84: lowercase_ : Tuple = 3_84 else: lowercase_ : List[Any] = 2_56 lowercase_ : Optional[int] = 'imagenet-22k-id2label.json' elif task_name.startswith('ade20k_' ): lowercase_ : Tuple = 1_51 lowercase_ : str = 5_12 lowercase_ : Optional[int] = 'ade20k-id2label.json' lowercase_ : Optional[int] = True elif task_name.startswith('voc_' ): lowercase_ : Any = 21 lowercase_ : Optional[Any] = 5_12 lowercase_ : Dict = 'pascal-voc-id2label.json' lowercase_ : Tuple = True # orig_config lowercase_ : List[str] = load_orig_config_file(__SCREAMING_SNAKE_CASE ) assert getattr(__SCREAMING_SNAKE_CASE , 'model.classification.name' , -1 ) == "mobilevit_v2", "Invalid model" lowercase_ : int = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.width_multiplier' , 1.0 ) assert ( getattr(__SCREAMING_SNAKE_CASE , 'model.classification.mitv2.attn_norm_layer' , -1 ) == "layer_norm_2d" ), "Norm layers other than layer_norm_2d is not supported" lowercase_ : Union[str, Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.classification.activation.name' , 'swish' ) # config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256) if is_segmentation_model: lowercase_ : List[str] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.output_stride' , 16 ) if "_deeplabv3" in task_name: lowercase_ : Optional[int] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_rates' , [12, 24, 36] ) lowercase_ : int = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_out_channels' , 5_12 ) lowercase_ : Optional[Any] = getattr(__SCREAMING_SNAKE_CASE , 'model.segmentation.deeplabv3.aspp_dropout' , 0.1 ) # id2label lowercase_ : List[str] = 'huggingface/label-files' lowercase_ : Optional[int] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) ) lowercase_ : Any = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} lowercase_ : int = idalabel lowercase_ : Optional[int] = {v: k for k, v in idalabel.items()} return config def lowercase__( __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : str ): lowercase_ : List[Any] = dct.pop(__SCREAMING_SNAKE_CASE ) lowercase_ : Union[str, Any] = val def lowercase__( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any]=False ): if base_model: lowercase_ : Dict = '' else: lowercase_ : Optional[int] = 'mobilevitv2.' lowercase_ : Optional[int] = [] for k in state_dict.keys(): if k[:8] == "encoder.": lowercase_ : List[Any] = k[8:] else: lowercase_ : List[Any] = k if ".block." in k: lowercase_ : Optional[Any] = k_new.replace('.block.' , '.' ) if ".conv." in k: lowercase_ : List[str] = k_new.replace('.conv.' , '.convolution.' ) if ".norm." in k: lowercase_ : List[Any] = k_new.replace('.norm.' , '.normalization.' ) if "conv_1." in k: lowercase_ : str = k_new.replace('conv_1.' , F'''{model_prefix}conv_stem.''' ) for i in [1, 2]: if F'''layer_{i}.''' in k: lowercase_ : List[Any] = k_new.replace(F'''layer_{i}.''' , F'''{model_prefix}encoder.layer.{i-1}.layer.''' ) if ".exp_1x1." in k: lowercase_ : Dict = k_new.replace('.exp_1x1.' , '.expand_1x1.' ) if ".red_1x1." in k: lowercase_ : Any = k_new.replace('.red_1x1.' , '.reduce_1x1.' ) for i in [3, 4, 5]: if F'''layer_{i}.0.''' in k: lowercase_ : Union[str, Any] = k_new.replace(F'''layer_{i}.0.''' , F'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' ) if F'''layer_{i}.1.local_rep.0.''' in k: lowercase_ : int = k_new.replace(F'''layer_{i}.1.local_rep.0.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' ) if F'''layer_{i}.1.local_rep.1.''' in k: lowercase_ : Any = k_new.replace(F'''layer_{i}.1.local_rep.1.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' ) for i in [3, 4, 5]: if i == 3: lowercase_ : Dict = [0, 1] elif i == 4: lowercase_ : Optional[Any] = [0, 1, 2, 3] elif i == 5: lowercase_ : List[str] = [0, 1, 2] for j in j_in: if F'''layer_{i}.1.global_rep.{j}.''' in k: lowercase_ : int = k_new.replace( F'''layer_{i}.1.global_rep.{j}.''' , F'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' ) if F'''layer_{i}.1.global_rep.{j+1}.''' in k: lowercase_ : List[str] = k_new.replace( F'''layer_{i}.1.global_rep.{j+1}.''' , F'''{model_prefix}encoder.layer.{i-1}.layernorm.''' ) if F'''layer_{i}.1.conv_proj.''' in k: lowercase_ : Optional[int] = k_new.replace(F'''layer_{i}.1.conv_proj.''' , F'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' ) if "pre_norm_attn.0." in k: lowercase_ : Any = k_new.replace('pre_norm_attn.0.' , 'layernorm_before.' ) if "pre_norm_attn.1." in k: lowercase_ : Optional[Any] = k_new.replace('pre_norm_attn.1.' , 'attention.' ) if "pre_norm_ffn.0." in k: lowercase_ : Union[str, Any] = k_new.replace('pre_norm_ffn.0.' , 'layernorm_after.' ) if "pre_norm_ffn.1." in k: lowercase_ : Optional[Any] = k_new.replace('pre_norm_ffn.1.' , 'ffn.conv1.' ) if "pre_norm_ffn.3." in k: lowercase_ : Dict = k_new.replace('pre_norm_ffn.3.' , 'ffn.conv2.' ) if "classifier.1." in k: lowercase_ : str = k_new.replace('classifier.1.' , 'classifier.' ) if "seg_head." in k: lowercase_ : Optional[Any] = k_new.replace('seg_head.' , 'segmentation_head.' ) if ".aspp_layer." in k: lowercase_ : List[str] = k_new.replace('.aspp_layer.' , '.' ) if ".aspp_pool." in k: lowercase_ : Optional[Any] = k_new.replace('.aspp_pool.' , '.' ) rename_keys.append((k, k_new) ) return rename_keys def lowercase__( __SCREAMING_SNAKE_CASE : Dict ): lowercase_ : Optional[int] = [] for k in state_dict.keys(): if k.startswith('seg_head.aux_head.' ): keys_to_ignore.append(__SCREAMING_SNAKE_CASE ) for k in keys_to_ignore: state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( ): lowercase_ : Dict = 'http://images.cocodataset.org/val2017/000000039769.jpg' # url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg" lowercase_ : Optional[int] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowercase__( __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : int = get_mobilevitva_config(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # load original state_dict lowercase_ : List[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' ) # load huggingface model if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ): lowercase_ : str = MobileViTVaForSemanticSegmentation(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : Optional[Any] = False else: lowercase_ : Tuple = MobileViTVaForImageClassification(__SCREAMING_SNAKE_CASE ).eval() lowercase_ : int = False # remove and rename some keys of load the original model lowercase_ : Tuple = checkpoint remove_unused_keys(__SCREAMING_SNAKE_CASE ) lowercase_ : Optional[int] = create_rename_keys(__SCREAMING_SNAKE_CASE , base_model=__SCREAMING_SNAKE_CASE ) for rename_key_src, rename_key_dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # load modified state_dict model.load_state_dict(__SCREAMING_SNAKE_CASE ) # Check outputs on an image, prepared by MobileViTImageProcessor lowercase_ : List[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 ) lowercase_ : Dict = image_processor(images=prepare_img() , return_tensors='pt' ) lowercase_ : str = model(**__SCREAMING_SNAKE_CASE ) # verify classification model if task_name.startswith('imagenet' ): lowercase_ : int = outputs.logits lowercase_ : Any = logits.argmax(-1 ).item() print('Predicted class:' , model.config.idalabel[predicted_class_idx] ) if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0: # expected_logits for base variant lowercase_ : Optional[int] = torch.tensor([-1.6_336E00, -7.3_204E-02, -5.1_883E-01] ) assert torch.allclose(logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) print(F'''Saving model {task_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": __SCREAMING_SNAKE_CASE =argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="imagenet1k_256", type=str, help=( "Name of the task for which the MobileViTV2 model you'd like to convert is trained on . " "\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n " ), choices=[ "imagenet1k_256", "imagenet1k_384", "imagenet21k_to_1k_256", "imagenet21k_to_1k_384", "ade20k_deeplabv3", "voc_deeplabv3", ], ) parser.add_argument( "--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)." ) parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.") parser.add_argument( "--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory." ) __SCREAMING_SNAKE_CASE =parser.parse_args() convert_mobilevitva_checkpoint( args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path )
321
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def lowercase__( *__SCREAMING_SNAKE_CASE : Tuple ): with open(__SCREAMING_SNAKE_CASE , 'r' ) as fh: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_EX ) try: print(*__SCREAMING_SNAKE_CASE ) finally: fcntl.flock(__SCREAMING_SNAKE_CASE , fcntl.LOCK_UN ) __SCREAMING_SNAKE_CASE =int(os.environ["LOCAL_RANK"]) torch.cuda.set_device(local_rank) __SCREAMING_SNAKE_CASE =torch.device("cuda", local_rank) __SCREAMING_SNAKE_CASE =socket.gethostname() __SCREAMING_SNAKE_CASE =F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group("nccl") dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __SCREAMING_SNAKE_CASE =dist.get_rank() __SCREAMING_SNAKE_CASE =dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
321
1
"""simple docstring""" import unittest from knapsack import knapsack as k class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = 0 lowercase_ : Dict = [0] lowercase_ : List[str] = [0] lowercase_ : List[str] = len(__UpperCamelCase ) self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,0 ) lowercase_ : Union[str, Any] = [60] lowercase_ : Dict = [10] lowercase_ : List[str] = len(__UpperCamelCase ) self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,0 ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : int = 3 lowercase_ : List[str] = [1, 2, 3] lowercase_ : Union[str, Any] = [3, 2, 1] lowercase_ : Optional[int] = len(__UpperCamelCase ) self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,5 ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : Tuple = 50 lowercase_ : int = [60, 100, 120] lowercase_ : Tuple = [10, 20, 30] lowercase_ : Union[str, Any] = len(__UpperCamelCase ) self.assertEqual(k.knapsack(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) ,220 ) if __name__ == "__main__": unittest.main()
321
"""simple docstring""" class UpperCamelCase : def __init__( self ,__UpperCamelCase ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : List[Any] = name lowercase_ : int = val def __str__( self ) -> Tuple: '''simple docstring''' return f'''{self.__class__.__name__}({self.name}, {self.val})''' def __lt__( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return self.val < other.val class UpperCamelCase : def __init__( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' lowercase_ : Optional[int] = {} lowercase_ : Tuple = {} lowercase_ : Union[str, Any] = self.build_heap(__UpperCamelCase ) def __getitem__( self ,__UpperCamelCase ) -> int: '''simple docstring''' return self.get_value(__UpperCamelCase ) def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' return (idx - 1) // 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[Any]: '''simple docstring''' return idx * 2 + 1 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Tuple: '''simple docstring''' return idx * 2 + 2 def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' return self.heap_dict[key] def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = len(__UpperCamelCase ) - 1 lowercase_ : Optional[int] = self.get_parent_idx(__UpperCamelCase ) for idx, i in enumerate(__UpperCamelCase ): lowercase_ : Any = idx lowercase_ : str = i.val for i in range(__UpperCamelCase ,-1 ,-1 ): self.sift_down(__UpperCamelCase ,__UpperCamelCase ) return array def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> Tuple: '''simple docstring''' while True: lowercase_ : List[str] = self.get_left_child_idx(__UpperCamelCase ) # noqa: E741 lowercase_ : List[str] = self.get_right_child_idx(__UpperCamelCase ) lowercase_ : List[str] = idx if l < len(__UpperCamelCase ) and array[l] < array[idx]: lowercase_ : List[str] = l if r < len(__UpperCamelCase ) and array[r] < array[smallest]: lowercase_ : Dict = r if smallest != idx: lowercase_ , lowercase_ : Union[str, Any] = array[smallest], array[idx] ( ( lowercase_ ) , ( lowercase_ ) , ) : str = ( self.idx_of_element[array[smallest]], self.idx_of_element[array[idx]], ) lowercase_ : Any = smallest else: break def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int: '''simple docstring''' lowercase_ : Dict = self.get_parent_idx(__UpperCamelCase ) while p >= 0 and self.heap[p] > self.heap[idx]: lowercase_ , lowercase_ : Any = self.heap[idx], self.heap[p] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[idx]], self.idx_of_element[self.heap[p]], ) lowercase_ : int = p lowercase_ : str = self.get_parent_idx(__UpperCamelCase ) def _UpperCAmelCase ( self ) -> str: '''simple docstring''' return self.heap[0] def _UpperCAmelCase ( self ) -> Optional[int]: '''simple docstring''' lowercase_ , lowercase_ : Optional[Any] = self.heap[-1], self.heap[0] lowercase_ , lowercase_ : Tuple = ( self.idx_of_element[self.heap[-1]], self.idx_of_element[self.heap[0]], ) lowercase_ : Tuple = self.heap.pop() del self.idx_of_element[x] self.sift_down(0 ,self.heap ) return x def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Dict: '''simple docstring''' self.heap.append(__UpperCamelCase ) lowercase_ : Tuple = len(self.heap ) - 1 lowercase_ : Optional[int] = node.val self.sift_up(len(self.heap ) - 1 ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' return len(self.heap ) == 0 def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase ) -> List[Any]: '''simple docstring''' assert ( self.heap[self.idx_of_element[node]].val > new_value ), "newValue must be less that current value" lowercase_ : Any = new_value lowercase_ : List[str] = new_value self.sift_up(self.idx_of_element[node] ) __SCREAMING_SNAKE_CASE =Node("R", -1) __SCREAMING_SNAKE_CASE =Node("B", 6) __SCREAMING_SNAKE_CASE =Node("A", 3) __SCREAMING_SNAKE_CASE =Node("X", 1) __SCREAMING_SNAKE_CASE =Node("E", 4) # Use one of these two ways to generate Min-Heap # Generating Min-Heap from array __SCREAMING_SNAKE_CASE =MinHeap([r, b, a, x, e]) # Generating Min-Heap by Insert method # myMinHeap.insert(a) # myMinHeap.insert(b) # myMinHeap.insert(x) # myMinHeap.insert(r) # myMinHeap.insert(e) # Before print("Min Heap - before decrease key") for i in my_min_heap.heap: print(i) print("Min Heap - After decrease key of node [B -> -17]") my_min_heap.decrease_key(b, -17) # After for i in my_min_heap.heap: print(i) if __name__ == "__main__": import doctest doctest.testmod()
321
1
"""simple docstring""" from __future__ import annotations from math import pow, sqrt def lowercase__( __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : float ): if (resistance, reactance, impedance).count(0 ) != 1: raise ValueError('One and only one argument must be 0' ) if resistance == 0: return {"resistance": sqrt(pow(__SCREAMING_SNAKE_CASE , 2 ) - pow(__SCREAMING_SNAKE_CASE , 2 ) )} elif reactance == 0: return {"reactance": sqrt(pow(__SCREAMING_SNAKE_CASE , 2 ) - pow(__SCREAMING_SNAKE_CASE , 2 ) )} elif impedance == 0: return {"impedance": sqrt(pow(__SCREAMING_SNAKE_CASE , 2 ) + pow(__SCREAMING_SNAKE_CASE , 2 ) )} else: raise ValueError('Exactly one argument must be 0' ) if __name__ == "__main__": import doctest doctest.testmod()
321
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCamelCase ( unittest.TestCase ): def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : List[Any] = tempfile.mkdtemp() # fmt: off lowercase_ : Any = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>'] # fmt: on lowercase_ : int = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) ) lowercase_ : Union[str, Any] = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', ''] lowercase_ : Tuple = {'unk_token': '<unk>'} lowercase_ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] ) lowercase_ : int = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] ) with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp: fp.write(json.dumps(__UpperCamelCase ) + '\n' ) with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp: fp.write('\n'.join(__UpperCamelCase ) ) lowercase_ : Any = { 'do_resize': True, 'size': 20, 'do_center_crop': True, 'crop_size': 18, 'do_normalize': True, 'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073], 'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711], } lowercase_ : List[str] = os.path.join(self.tmpdirname ,__UpperCamelCase ) with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp: json.dump(__UpperCamelCase ,__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Optional[int]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> str: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname ,**__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def _UpperCAmelCase ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ : Dict = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )] lowercase_ : List[str] = [Image.fromarray(np.moveaxis(__UpperCamelCase ,0 ,-1 ) ) for x in image_inputs] return image_inputs def _UpperCAmelCase ( self ) -> str: '''simple docstring''' lowercase_ : Optional[int] = self.get_tokenizer() lowercase_ : List[Any] = self.get_rust_tokenizer() lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ : Union[str, Any] = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=__UpperCamelCase ) lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ : str = CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer ,__UpperCamelCase ) self.assertIsInstance(processor_fast.tokenizer ,__UpperCamelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor ,__UpperCamelCase ) self.assertIsInstance(processor_fast.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Optional[int] = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ : List[Any] = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' ) lowercase_ : Any = self.get_image_processor(do_normalize=__UpperCamelCase ,padding_value=1.0 ) lowercase_ : Any = CLIPSegProcessor.from_pretrained( self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=__UpperCamelCase ,padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer ,__UpperCamelCase ) self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor ,__UpperCamelCase ) def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[str] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = self.prepare_image_inputs() lowercase_ : str = image_processor(__UpperCamelCase ,return_tensors='np' ) lowercase_ : Union[str, Any] = processor(images=__UpperCamelCase ,return_tensors='np' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 ) def _UpperCAmelCase ( self ) -> List[str]: '''simple docstring''' lowercase_ : Dict = self.get_image_processor() lowercase_ : List[Any] = self.get_tokenizer() lowercase_ : List[Any] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Dict = 'lower newer' lowercase_ : Any = processor(text=__UpperCamelCase ) lowercase_ : int = tokenizer(__UpperCamelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] ,encoded_processor[key] ) def _UpperCAmelCase ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ : str = self.get_image_processor() lowercase_ : str = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : List[Any] = 'lower newer' lowercase_ : str = self.prepare_image_inputs() lowercase_ : Optional[int] = processor(text=__UpperCamelCase ,images=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Tuple: '''simple docstring''' lowercase_ : Tuple = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : List[str] = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = self.prepare_image_inputs() lowercase_ : Optional[Any] = self.prepare_image_inputs() lowercase_ : int = processor(images=__UpperCamelCase ,visual_prompt=__UpperCamelCase ) self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] ) # test if it raises when no input is passed with pytest.raises(__UpperCamelCase ): processor() def _UpperCAmelCase ( self ) -> Dict: '''simple docstring''' lowercase_ : List[str] = self.get_image_processor() lowercase_ : Optional[Any] = self.get_tokenizer() lowercase_ : int = CLIPSegProcessor(tokenizer=__UpperCamelCase ,image_processor=__UpperCamelCase ) lowercase_ : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ : List[str] = processor.batch_decode(__UpperCamelCase ) lowercase_ : Optional[Any] = tokenizer.batch_decode(__UpperCamelCase ) self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
321
1
"""simple docstring""" import os from pickle import UnpicklingError from typing import Dict, Tuple import jax import jax.numpy as jnp import numpy as np from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict import transformers from .utils import logging __SCREAMING_SNAKE_CASE =logging.get_logger(__name__) def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any=False ): try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise if not is_sharded: lowercase_ : Optional[Any] = os.path.abspath(__SCREAMING_SNAKE_CASE ) logger.info(F'''Loading PyTorch weights from {pt_path}''' ) lowercase_ : Optional[int] = torch.load(__SCREAMING_SNAKE_CASE , map_location='cpu' ) logger.info(F'''PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.''' ) lowercase_ : Optional[Any] = convert_pytorch_state_dict_to_flax(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else: # model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files lowercase_ : Dict = convert_pytorch_sharded_state_dict_to_flax(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return flax_state_dict def lowercase__( __SCREAMING_SNAKE_CASE : Tuple[str] , __SCREAMING_SNAKE_CASE : np.ndarray , __SCREAMING_SNAKE_CASE : Dict[str, jnp.ndarray] , __SCREAMING_SNAKE_CASE : str , ): def is_key_or_prefix_key_in_dict(__SCREAMING_SNAKE_CASE : Tuple[str] ) -> bool: return len(set(__SCREAMING_SNAKE_CASE ) & {key, (model_prefix,) + key} ) > 0 # layer norm lowercase_ : Union[str, Any] = pt_tuple_key[:-1] + ('scale',) if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(__SCREAMING_SNAKE_CASE ): return renamed_pt_tuple_key, pt_tensor # batch norm layer mean lowercase_ : Union[str, Any] = pt_tuple_key[:-1] + ('mean',) if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(__SCREAMING_SNAKE_CASE ): return renamed_pt_tuple_key, pt_tensor # batch norm layer var lowercase_ : Dict = pt_tuple_key[:-1] + ('var',) if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(__SCREAMING_SNAKE_CASE ): return renamed_pt_tuple_key, pt_tensor # embedding lowercase_ : List[Any] = pt_tuple_key[:-1] + ('embedding',) if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(__SCREAMING_SNAKE_CASE ): return renamed_pt_tuple_key, pt_tensor # conv layer lowercase_ : Any = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = pt_tensor.transpose(2 , 3 , 1 , 0 ) return renamed_pt_tuple_key, pt_tensor # linear layer lowercase_ : List[Any] = pt_tuple_key[:-1] + ('kernel',) if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(__SCREAMING_SNAKE_CASE ): lowercase_ : Dict = pt_tensor.T return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm weight lowercase_ : Tuple = pt_tuple_key[:-1] + ('weight',) if pt_tuple_key[-1] == "gamma": return renamed_pt_tuple_key, pt_tensor # old PyTorch layer norm bias lowercase_ : Dict = pt_tuple_key[:-1] + ('bias',) if pt_tuple_key[-1] == "beta": return renamed_pt_tuple_key, pt_tensor # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 lowercase_ : Optional[int] = None if pt_tuple_key[-3::2] == ("parametrizations", "original0"): lowercase_ : Optional[Any] = pt_tuple_key[-2] + '_g' elif pt_tuple_key[-3::2] == ("parametrizations", "original1"): lowercase_ : List[str] = pt_tuple_key[-2] + '_v' if name is not None: lowercase_ : Dict = pt_tuple_key[:-3] + (name,) return renamed_pt_tuple_key, pt_tensor return pt_tuple_key, pt_tensor def lowercase__( __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Tuple ): # convert pytorch tensor to numpy lowercase_ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()} lowercase_ : List[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers if "params" in flax_model.params: lowercase_ : Union[str, Any] = flax_model.params['params'] else: lowercase_ : Optional[int] = flax_model.params lowercase_ : Tuple = flatten_dict(__SCREAMING_SNAKE_CASE ) # add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowercase_ : Any = flatten_dict(flax_model.params['batch_stats'] ) random_flax_state_dict.update(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = {} lowercase_ : Optional[int] = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) lowercase_ : int = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase_ : Tuple = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary lowercase_ : Dict = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowercase_ : List[Any] = pt_tuple_key[1:] # Correctly rename weight parameters lowercase_ , lowercase_ : Optional[int] = rename_key_and_reshape_tensor( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # add model prefix if necessary lowercase_ : Tuple = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowercase_ : Tuple = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1] or "var" in flax_key[-1]: lowercase_ : Union[str, Any] = jnp.asarray(__SCREAMING_SNAKE_CASE ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) continue # also add unexpected weight so that warning is thrown lowercase_ : Dict = jnp.asarray(__SCREAMING_SNAKE_CASE ) else: # also add unexpected weight so that warning is thrown lowercase_ : Optional[int] = jnp.asarray(__SCREAMING_SNAKE_CASE ) return unflatten_dict(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int ): import torch # Load the index lowercase_ : Dict = {} for shard_file in shard_filenames: # load using msgpack utils lowercase_ : Any = torch.load(__SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = {k: v.numpy() for k, v in pt_state_dict.items()} lowercase_ : Optional[Any] = flax_model.base_model_prefix # use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict if "batch_stats" in flax_model.params: lowercase_ : List[Any] = flax_model.params['params'] lowercase_ : int = flatten_dict(__SCREAMING_SNAKE_CASE ) random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) ) else: lowercase_ : int = flax_model.params lowercase_ : Tuple = flatten_dict(__SCREAMING_SNAKE_CASE ) lowercase_ : Dict = (model_prefix not in flax_model_params) and ( model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()} ) lowercase_ : Tuple = (model_prefix in flax_model_params) and ( model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()} ) # Need to change some parameters name to match Flax names for pt_key, pt_tensor in pt_state_dict.items(): lowercase_ : Optional[Any] = tuple(pt_key.split('.' ) ) # remove base model prefix if necessary lowercase_ : Tuple = pt_tuple_key[0] == model_prefix if load_model_with_head_into_base_model and has_base_model_prefix: lowercase_ : int = pt_tuple_key[1:] # Correctly rename weight parameters lowercase_ , lowercase_ : int = rename_key_and_reshape_tensor( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # add model prefix if necessary lowercase_ : Optional[int] = (model_prefix,) + flax_key in random_flax_state_dict if load_base_model_into_model_with_head and require_base_model_prefix: lowercase_ : Dict = (model_prefix,) + flax_key if flax_key in random_flax_state_dict: if flax_tensor.shape != random_flax_state_dict[flax_key].shape: raise ValueError( F'''PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape ''' F'''{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) # add batch stats if the model contains batchnorm layers if "batch_stats" in flax_model.params: if "mean" in flax_key[-1]: lowercase_ : Dict = jnp.asarray(__SCREAMING_SNAKE_CASE ) continue if "var" in flax_key[-1]: lowercase_ : List[str] = jnp.asarray(__SCREAMING_SNAKE_CASE ) continue # remove num_batches_tracked key if "num_batches_tracked" in flax_key[-1]: flax_state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) continue # also add unexpected weight so that warning is thrown lowercase_ : Tuple = jnp.asarray(__SCREAMING_SNAKE_CASE ) else: # also add unexpected weight so that warning is thrown lowercase_ : List[Any] = jnp.asarray(__SCREAMING_SNAKE_CASE ) return unflatten_dict(__SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Any ): lowercase_ : Optional[int] = os.path.abspath(__SCREAMING_SNAKE_CASE ) logger.info(F'''Loading Flax weights from {flax_checkpoint_path}''' ) # import correct flax class lowercase_ : str = getattr(__SCREAMING_SNAKE_CASE , 'Flax' + model.__class__.__name__ ) # load flax weight dict with open(__SCREAMING_SNAKE_CASE , 'rb' ) as state_f: try: lowercase_ : Optional[int] = from_bytes(__SCREAMING_SNAKE_CASE , state_f.read() ) except UnpicklingError: raise EnvironmentError(F'''Unable to convert {flax_checkpoint_path} to Flax deserializable object. ''' ) return load_flax_weights_in_pytorch_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowercase__( __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] ): try: import torch # noqa: F401 except ImportError: logger.error( 'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see' ' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation' ' instructions.' ) raise # check if we have bf16 weights lowercase_ : Any = flatten_dict(jax.tree_util.tree_map(lambda __SCREAMING_SNAKE_CASE : x.dtype == jnp.bfloataa , __SCREAMING_SNAKE_CASE ) ).values() if any(__SCREAMING_SNAKE_CASE ): # convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16 # and bf16 is not fully supported in PT yet. logger.warning( 'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` ' 'before loading those in PyTorch model.' ) lowercase_ : Tuple = jax.tree_util.tree_map( lambda __SCREAMING_SNAKE_CASE : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , __SCREAMING_SNAKE_CASE ) lowercase_ : Tuple = flatten_dict(__SCREAMING_SNAKE_CASE ) lowercase_ : int = pt_model.state_dict() lowercase_ : Tuple = (pt_model.base_model_prefix in flax_state) and ( pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()} ) lowercase_ : Optional[int] = (pt_model.base_model_prefix not in flax_state) and ( pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()} ) # keep track of unexpected & missing keys lowercase_ : List[Any] = [] lowercase_ : List[str] = set(pt_model_dict.keys() ) for flax_key_tuple, flax_tensor in flax_state_dict.items(): lowercase_ : Optional[Any] = flax_key_tuple[0] == pt_model.base_model_prefix lowercase_ : Tuple = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict # adapt flax_key to prepare for loading from/to base model only if load_model_with_head_into_base_model and has_base_model_prefix: lowercase_ : Optional[Any] = flax_key_tuple[1:] elif load_base_model_into_model_with_head and require_base_model_prefix: lowercase_ : int = (pt_model.base_model_prefix,) + flax_key_tuple # rename flax weights to PyTorch format if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(__SCREAMING_SNAKE_CASE ) not in pt_model_dict: # conv layer lowercase_ : str = flax_key_tuple[:-1] + ('weight',) lowercase_ : List[str] = jnp.transpose(__SCREAMING_SNAKE_CASE , (3, 2, 0, 1) ) elif flax_key_tuple[-1] == "kernel" and ".".join(__SCREAMING_SNAKE_CASE ) not in pt_model_dict: # linear layer lowercase_ : List[Any] = flax_key_tuple[:-1] + ('weight',) lowercase_ : Optional[int] = flax_tensor.T elif flax_key_tuple[-1] in ["scale", "embedding"]: lowercase_ : Optional[Any] = flax_key_tuple[:-1] + ('weight',) # adding batch stats from flax batch norm to pt elif "mean" in flax_key_tuple[-1]: lowercase_ : Any = flax_key_tuple[:-1] + ('running_mean',) elif "var" in flax_key_tuple[-1]: lowercase_ : List[str] = flax_key_tuple[:-1] + ('running_var',) if "batch_stats" in flax_state: lowercase_ : List[str] = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header else: lowercase_ : Any = '.'.join(__SCREAMING_SNAKE_CASE ) # We also need to look at `pt_model_dict` and see if there are keys requiring further transformation. lowercase_ : Optional[int] = {} # New `weight_norm` from https://github.com/huggingface/transformers/pull/24030 for key in pt_model_dict: lowercase_ : Tuple = key.split('.' ) lowercase_ : Union[str, Any] = None if key_components[-3::2] == ["parametrizations", "original0"]: lowercase_ : Optional[int] = key_components[-2] + '_g' elif key_components[-3::2] == ["parametrizations", "original1"]: lowercase_ : int = key_components[-2] + '_v' if name is not None: lowercase_ : Union[str, Any] = key_components[:-3] + [name] lowercase_ : Optional[int] = '.'.join(__SCREAMING_SNAKE_CASE ) lowercase_ : str = key if flax_key in special_pt_names: lowercase_ : Tuple = special_pt_names[flax_key] if flax_key in pt_model_dict: if flax_tensor.shape != pt_model_dict[flax_key].shape: raise ValueError( F'''Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected ''' F'''to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.''' ) else: # add weight to pytorch dict lowercase_ : Optional[int] = np.asarray(__SCREAMING_SNAKE_CASE ) if not isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) else flax_tensor lowercase_ : List[str] = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # remove from missing keys missing_keys.remove(__SCREAMING_SNAKE_CASE ) else: # weight is not expected by PyTorch model unexpected_keys.append(__SCREAMING_SNAKE_CASE ) pt_model.load_state_dict(__SCREAMING_SNAKE_CASE ) # re-transform missing_keys to list lowercase_ : int = list(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: logger.warning( 'Some weights of the Flax model were not used when initializing the PyTorch model' F''' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing''' F''' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture''' ' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This' F''' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect''' ' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a' ' FlaxBertForSequenceClassification model).' ) else: logger.warning(F'''All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n''' ) if len(__SCREAMING_SNAKE_CASE ) > 0: logger.warning( F'''Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly''' F''' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to''' ' use it for predictions and inference.' ) else: logger.warning( F'''All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n''' 'If your task is similar to the task the model of the checkpoint was trained on, ' F'''you can already use {pt_model.__class__.__name__} for predictions without further training.''' ) return pt_model
321
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
321
1